text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2005-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Gary Burton
# Copyright (C) 2012 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Manages the main window and the pluggable views
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
from collections import defaultdict
import os
import time
import datetime
from io import StringIO
import gc
import html
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".")
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.cli.grampscli import CLIManager
from .user import User
from .plug import tool
from gramps.gen.plug import START
from gramps.gen.plug import REPORT
from gramps.gen.plug.report._constants import standalone_categories
from .plug import (PluginWindows, ReportPluginDialog, ToolPluginDialog)
from .plug.report import report, BookSelector
from .utils import AvailableUpdates
from .pluginmanager import GuiPluginManager
from gramps.gen.relationship import get_relationship_calculator
from .displaystate import DisplayState, RecentDocsMenu
from gramps.gen.const import (HOME_DIR, ICON, URL_BUGTRACKER, URL_HOMEPAGE,
URL_MAILINGLIST, URL_MANUAL_PAGE, URL_WIKISTRING,
WIKI_EXTRAPLUGINS, URL_BUGHOME)
from gramps.gen.constfunc import is_quartz
from gramps.gen.config import config
from gramps.gen.errors import WindowActiveError
from .dialog import ErrorDialog, WarningDialog, QuestionDialog2, InfoDialog
from .widgets import Statusbar
from .undohistory import UndoHistory
from gramps.gen.utils.file import media_path_full
from .dbloader import DbLoader
from .display import display_help, display_url
from .configure import GrampsPreferences
from .aboutdialog import GrampsAboutDialog
from .navigator import Navigator
from .views.tags import Tags
from .uimanager import ActionGroup, valid_action_name
from gramps.gen.lib import (Person, Surname, Family, Media, Note, Place,
Source, Repository, Citation, Event, EventType,
ChildRef)
from gramps.gui.editors import (EditPerson, EditFamily, EditMedia, EditNote,
EditPlace, EditSource, EditRepository,
EditCitation, EditEvent)
from gramps.gen.db.exceptions import DbWriteFailure
from gramps.gen.filters import reload_custom_filters
from .managedwindow import ManagedWindow
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_UNSUPPORTED = ("Unsupported", _("Unsupported"))
WIKI_HELP_PAGE_FAQ = '%s_-_FAQ' % URL_MANUAL_PAGE
WIKI_HELP_PAGE_KEY = '%s_-_Keybindings' % URL_MANUAL_PAGE
WIKI_HELP_PAGE_MAN = '%s' % URL_MANUAL_PAGE
CSS_FONT = """
#view {
font-family: %s;
}
"""
#-------------------------------------------------------------------------
#
# ViewManager
#
#-------------------------------------------------------------------------
class ViewManager(CLIManager):
"""
**Overview**
The ViewManager is the session manager of the program.
Specifically, it manages the main window of the program. It is closely tied
into the Gtk.UIManager to control all menus and actions.
The ViewManager controls the various Views within the Gramps programs.
Views are organised in categories. The categories can be accessed via
a sidebar. Within a category, the different views are accesible via the
toolbar of view menu.
A View is a particular way of looking a information in the Gramps main
window. Each view is separate from the others, and has no knowledge of
the others.
Examples of current views include:
- Person View
- Relationship View
- Family View
- Source View
The View Manager does not have to know the number of views, the type of
views, or any other details about the views. It simply provides the
method of containing each view, and has methods for creating, deleting and
switching between the views.
"""
def __init__(self, app, dbstate, view_category_order, user=None):
"""
The viewmanager is initialised with a dbstate on which Gramps is
working, and a fixed view_category_order, which is the order in which
the view categories are accessible in the sidebar.
"""
CLIManager.__init__(self, dbstate, setloader=False, user=user)
self.view_category_order = view_category_order
self.app = app
#set pluginmanager to GUI one
self._pmgr = GuiPluginManager.get_instance()
self.merge_ids = []
self.toolactions = None
self.tool_menu_ui_id = None
self.reportactions = None
self.report_menu_ui_id = None
self.active_page = None
self.pages = []
self.page_lookup = {}
self.views = None
self.current_views = [] # The current view in each category
self.view_changing = False
self.show_navigator = config.get('interface.view')
self.show_toolbar = config.get('interface.toolbar-on')
self.fullscreen = config.get('interface.fullscreen')
self.__build_main_window() # sets self.uistate
if self.user is None:
self.user = User(error=ErrorDialog,
parent=self.window,
callback=self.uistate.pulse_progressbar,
uistate=self.uistate,
dbstate=self.dbstate)
self.__connect_signals()
self.do_reg_plugins(self.dbstate, self.uistate)
reload_custom_filters()
#plugins loaded now set relationship class
self.rel_class = get_relationship_calculator()
self.uistate.set_relationship_class()
# Need to call after plugins have been registered
self.uistate.connect('update-available', self.process_updates)
self.check_for_updates()
# Set autobackup
self.uistate.connect('autobackup', self.autobackup)
self.uistate.set_backup_timer()
def check_for_updates(self):
"""
Check for add-on updates.
"""
howoften = config.get("behavior.check-for-addon-updates")
update = False
if howoften != 0: # update never if zero
year, mon, day = list(map(
int, config.get("behavior.last-check-for-addon-updates").split("/")))
days = (datetime.date.today() - datetime.date(year, mon, day)).days
if howoften == 1 and days >= 30: # once a month
update = True
elif howoften == 2 and days >= 7: # once a week
update = True
elif howoften == 3 and days >= 1: # once a day
update = True
elif howoften == 4: # always
update = True
if update:
AvailableUpdates(self.uistate).start()
def process_updates(self, addon_update_list):
"""
Called when add-on updates are available.
"""
rescan = PluginWindows.UpdateAddons(self.uistate, [],
addon_update_list).rescan
self.do_reg_plugins(self.dbstate, self.uistate, rescan=rescan)
def _errordialog(self, title, errormessage):
"""
Show the error.
In the GUI, the error is shown, and a return happens
"""
ErrorDialog(title, errormessage,
parent=self.uistate.window)
return 1
def __build_main_window(self):
"""
Builds the GTK interface
"""
width = config.get('interface.main-window-width')
height = config.get('interface.main-window-height')
horiz_position = config.get('interface.main-window-horiz-position')
vert_position = config.get('interface.main-window-vert-position')
font = config.get('utf8.selected-font')
self.window = Gtk.ApplicationWindow(application=self.app)
self.app.window = self.window
self.window.set_icon_from_file(ICON)
self.window.set_default_size(width, height)
self.window.move(horiz_position, vert_position)
self.provider = Gtk.CssProvider()
self.change_font(font)
#Set the mnemonic modifier on Macs to alt-ctrl so that it
#doesn't interfere with the extended keyboard, see
#https://gramps-project.org/bugs/view.php?id=6943
if is_quartz():
self.window.set_mnemonic_modifier(
Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.MOD1_MASK)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.window.add(vbox)
hpane = Gtk.Paned()
self.ebox = Gtk.EventBox()
self.navigator = Navigator(self)
self.ebox.add(self.navigator.get_top())
hpane.pack1(self.ebox, False, False)
hpane.show()
self.notebook = Gtk.Notebook()
self.notebook.set_scrollable(True)
self.notebook.set_show_tabs(False)
self.notebook.show()
self.__init_lists()
self.__build_ui_manager()
hpane.add2(self.notebook)
toolbar = self.uimanager.get_widget('ToolBar')
self.statusbar = Statusbar()
self.statusbar.show()
vbox.pack_end(self.statusbar, False, True, 0)
vbox.pack_start(toolbar, False, True, 0)
vbox.pack_end(hpane, True, True, 0)
vbox.show()
self.uistate = DisplayState(self.window, self.statusbar,
self.uimanager, self)
# Create history objects
for nav_type in ('Person', 'Family', 'Event', 'Place', 'Source',
'Citation', 'Repository', 'Note', 'Media'):
self.uistate.register(self.dbstate, nav_type, 0)
self.dbstate.connect('database-changed', self.uistate.db_changed)
self.tags = Tags(self.uistate, self.dbstate)
# handle OPEN Recent Menu, insert it into the toolbar.
self.recent_manager = RecentDocsMenu(
self.uistate, self.dbstate, self._read_recent_file)
self.recent_manager.build(update_menu=False)
self.db_loader = DbLoader(self.dbstate, self.uistate)
self.__setup_navigator()
# need to get toolbar again, because it is a new object now.
toolbar = self.uimanager.get_widget('ToolBar')
if self.show_toolbar:
toolbar.show()
else:
toolbar.hide()
if self.fullscreen:
self.window.fullscreen()
self.window.set_title("%s - Gramps" % _('No Family Tree'))
self.window.show()
def __setup_navigator(self):
"""
If we have enabled te sidebar, show it, and turn off the tabs. If
disabled, hide the sidebar and turn on the tabs.
"""
if self.show_navigator:
self.ebox.show()
else:
self.ebox.hide()
def __connect_signals(self):
"""
Connects the signals needed
"""
self.del_event = self.window.connect('delete-event', self.quit)
self.notebook.connect('switch-page', self.view_changed)
def __init_lists(self):
"""
Initialize the actions lists for the UIManager
"""
self._app_actionlist = [
('quit', self.quit, None if is_quartz() else "<PRIMARY>q"),
('preferences', self.preferences_activate),
('about', self.display_about_box), ]
self._file_action_list = [
#('FileMenu', None, _('_Family Trees')),
('Open', self.__open_activate, "<PRIMARY>o"),
#('OpenRecent'_("Open an existing database")),
#('quit', self.quit, "<PRIMARY>q"),
#('ViewMenu', None, _('_View')),
('Navigator', self.navigator_toggle, "<PRIMARY>m",
self.show_navigator),
('Toolbar', self.toolbar_toggle, '', self.show_toolbar),
('Fullscreen', self.fullscreen_toggle, "F11", self.fullscreen),
#('EditMenu', None, _('_Edit')),
#('preferences', self.preferences_activate),
#('HelpMenu', None, _('_Help')),
('HomePage', home_page_activate),
('MailingLists', mailing_lists_activate),
('ReportBug', report_bug_activate),
('ExtraPlugins', extra_plugins_activate),
#('about', self.display_about_box),
('PluginStatus', self.__plugin_status),
('FAQ', faq_activate),
('KeyBindings', key_bindings),
('UserManual', manual_activate, 'F1'),
('TipOfDay', self.tip_of_day_activate), ]
self._readonly_action_list = [
('Close', self.close_database, "<control>w"),
('Export', self.export_data, "<PRIMARY>e"),
('Backup', self.quick_backup),
('Abandon', self.abort),
('Reports', self.reports_clicked),
#('GoMenu', None, _('_Go')),
#('ReportsMenu', None, _('_Reports')),
('Books', self.run_book),
#('WindowsMenu', None, _('_Windows')),
#('F2', self.__keypress, 'F2'), #pedigreeview
#('F3', self.__keypress, 'F3'), # timelinepedigreeview
#('F4', self.__keypress, 'F4'), # timelinepedigreeview
#('F5', self.__keypress, 'F5'), # timelinepedigreeview
#('F6', self.__keypress, 'F6'), # timelinepedigreeview
#('F7', self.__keypress, 'F7'),
#('F8', self.__keypress, 'F8'),
#('F9', self.__keypress, 'F9'),
#('F11', self.__keypress, 'F11'), # used to go full screen
#('F12', self.__keypress, 'F12'),
#('<PRIMARY>BackSpace', self.__keypress, '<PRIMARY>BackSpace'),
#('<PRIMARY>Delete', self.__keypress, '<PRIMARY>Delete'),
#('<PRIMARY>Insert', self.__keypress, '<PRIMARY>Insert'),
#('<PRIMARY>J', self.__keypress, '<PRIMARY>J'),
('PRIMARY-1', self.__gocat, '<PRIMARY>1'),
('PRIMARY-2', self.__gocat, '<PRIMARY>2'),
('PRIMARY-3', self.__gocat, '<PRIMARY>3'),
('PRIMARY-4', self.__gocat, '<PRIMARY>4'),
('PRIMARY-5', self.__gocat, '<PRIMARY>5'),
('PRIMARY-6', self.__gocat, '<PRIMARY>6'),
('PRIMARY-7', self.__gocat, '<PRIMARY>7'),
('PRIMARY-8', self.__gocat, '<PRIMARY>8'),
('PRIMARY-9', self.__gocat, '<PRIMARY>9'),
('PRIMARY-0', self.__gocat, '<PRIMARY>0'),
# NOTE: CTRL+ALT+NUMBER is set in gramps.gui.navigator
('PRIMARY-N', self.__next_view, '<PRIMARY>N'),
# the following conflicts with PrintView!!!
('PRIMARY-P', self.__prev_view, '<PRIMARY>P'), ]
self._action_action_list = [
('Clipboard', self.clipboard, "<PRIMARY>b"),
#('AddMenu', None, _('_Add')),
#('AddNewMenu', None, _('New')),
('PersonAdd', self.add_new_person, "<shift><Alt>p"),
('FamilyAdd', self.add_new_family, "<shift><Alt>f"),
('EventAdd', self.add_new_event, "<shift><Alt>e"),
('PlaceAdd', self.add_new_place, "<shift><Alt>l"),
('SourceAdd', self.add_new_source, "<shift><Alt>s"),
('CitationAdd', self.add_new_citation, "<shift><Alt>c"),
('RepositoryAdd', self.add_new_repository, "<shift><Alt>r"),
('MediaAdd', self.add_new_media, "<shift><Alt>m"),
('NoteAdd', self.add_new_note, "<shift><Alt>n"),
('UndoHistory', self.undo_history, "<PRIMARY>H"),
#--------------------------------------
('Import', self.import_data, "<PRIMARY>i"),
('Tools', self.tools_clicked),
#('BookMenu', None, _('_Bookmarks')),
#('ToolsMenu', None, _('_Tools')),
('ConfigView', self.config_view, '<shift><PRIMARY>c'), ]
self._undo_action_list = [
('Undo', self.undo, '<PRIMARY>z'), ]
self._redo_action_list = [
('Redo', self.redo, '<shift><PRIMARY>z'), ]
def run_book(self, *action):
"""
Run a book.
"""
try:
BookSelector(self.dbstate, self.uistate)
except WindowActiveError:
return
def __gocat(self, action, value):
"""
Callback that is called on ctrl+number press. It moves to the
requested category like __next_view/__prev_view. 0 is 10
"""
cat = int(action.get_name()[-1])
if cat == 0:
cat = 10
cat -= 1
if cat >= len(self.current_views):
#this view is not present
return False
self.goto_page(cat, None)
def __next_view(self, action, value):
"""
Callback that is called when the next category action is selected. It
selects the next category as the active category. If we reach the end,
we wrap around to the first.
"""
curpage = self.notebook.get_current_page()
#find cat and view of the current page
for key in self.page_lookup:
if self.page_lookup[key] == curpage:
cat_num, view_num = key
break
#now go to next category
if cat_num >= len(self.current_views)-1:
self.goto_page(0, None)
else:
self.goto_page(cat_num+1, None)
def __prev_view(self, action, value):
"""
Callback that is called when the previous category action is selected.
It selects the previous category as the active category. If we reach
the beginning of the list, we wrap around to the last.
"""
curpage = self.notebook.get_current_page()
#find cat and view of the current page
for key in self.page_lookup:
if self.page_lookup[key] == curpage:
cat_num, view_num = key
break
#now go to next category
if cat_num > 0:
self.goto_page(cat_num-1, None)
else:
self.goto_page(len(self.current_views)-1, None)
def init_interface(self):
"""
Initialize the interface.
"""
self.views = self.get_available_views()
defaults = views_to_show(self.views,
config.get('preferences.use-last-view'))
self.current_views = defaults[2]
self.navigator.load_plugins(self.dbstate, self.uistate)
self.goto_page(defaults[0], defaults[1])
self.uimanager.set_actions_sensitive(self.fileactions, False)
self.__build_tools_menu(self._pmgr.get_reg_tools())
self.__build_report_menu(self._pmgr.get_reg_reports())
self._pmgr.connect('plugins-reloaded',
self.__rebuild_report_and_tool_menus)
self.uimanager.set_actions_sensitive(self.fileactions, True)
if not self.file_loaded:
self.uimanager.set_actions_visible(self.actiongroup, False)
self.uimanager.set_actions_visible(self.readonlygroup, False)
self.uimanager.set_actions_visible(self.undoactions, False)
self.uimanager.set_actions_visible(self.redoactions, False)
self.uimanager.update_menu()
config.connect("interface.statusbar", self.__statusbar_key_update)
def __statusbar_key_update(self, client, cnxn_id, entry, data):
"""
Callback function for statusbar key update
"""
self.uistate.modify_statusbar(self.dbstate)
def post_init_interface(self, show_manager=True):
"""
Showing the main window is deferred so that
ArgHandler can work without it always shown
"""
self.window.show()
if not self.dbstate.is_open() and show_manager:
self.__open_activate(None, None)
def do_reg_plugins(self, dbstate, uistate, rescan=False):
"""
Register the plugins at initialization time. The plugin status window
is opened on an error if the user has requested.
"""
# registering plugins
self.uistate.status_text(_('Registering plugins...'))
error = CLIManager.do_reg_plugins(self, dbstate, uistate,
rescan=rescan)
# get to see if we need to open the plugin status window
if error and config.get('behavior.pop-plugin-status'):
self.__plugin_status()
self.uistate.push_message(self.dbstate, _('Ready'))
def close_database(self, action=None, make_backup=True):
"""
Close the database
"""
self.dbstate.no_database()
self.post_close_db()
def no_del_event(self, *obj):
""" Routine to prevent window destroy with default handler if user
hits 'x' multiple times. """
return True
def quit(self, *obj):
"""
Closes out the program, backing up data
"""
# mark interface insenstitive to prevent unexpected events
self.uistate.set_sensitive(False)
# the following prevents reentering quit if user hits 'x' again
self.window.disconnect(self.del_event)
# the following prevents premature closing of main window if user
# hits 'x' multiple times.
self.window.connect('delete-event', self.no_del_event)
# backup data
if config.get('database.backup-on-exit'):
self.autobackup()
# close the database
if self.dbstate.is_open():
self.dbstate.db.close(user=self.user)
# have each page save anything, if they need to:
self.__delete_pages()
# save the current window size
(width, height) = self.window.get_size()
config.set('interface.main-window-width', width)
config.set('interface.main-window-height', height)
# save the current window position
(horiz_position, vert_position) = self.window.get_position()
config.set('interface.main-window-horiz-position', horiz_position)
config.set('interface.main-window-vert-position', vert_position)
config.save()
self.app.quit()
def abort(self, *obj):
"""
Abandon changes and quit.
"""
if self.dbstate.db.abort_possible:
dialog = QuestionDialog2(
_("Abort changes?"),
_("Aborting changes will return the database to the state "
"it was before you started this editing session."),
_("Abort changes"),
_("Cancel"),
parent=self.uistate.window)
if dialog.run():
self.dbstate.db.disable_signals()
while self.dbstate.db.undo():
pass
self.quit()
else:
WarningDialog(
_("Cannot abandon session's changes"),
_('Changes cannot be completely abandoned because the '
'number of changes made in the session exceeded the '
'limit.'), parent=self.uistate.window)
def __init_action_group(self, name, actions, sensitive=True, toggles=None):
"""
Initialize an action group for the UIManager
"""
new_group = ActionGroup(name, actions)
self.uimanager.insert_action_group(new_group)
self.uimanager.set_actions_sensitive(new_group, sensitive)
return new_group
def __build_ui_manager(self):
"""
Builds the action groups
"""
self.uimanager = self.app.uimanager
self.actiongroup = self.__init_action_group(
'RW', self._action_action_list)
self.readonlygroup = self.__init_action_group(
'RO', self._readonly_action_list)
self.fileactions = self.__init_action_group(
'FileWindow', self._file_action_list)
self.undoactions = self.__init_action_group(
'Undo', self._undo_action_list, sensitive=False)
self.redoactions = self.__init_action_group(
'Redo', self._redo_action_list, sensitive=False)
self.appactions = ActionGroup('AppActions', self._app_actionlist, 'app')
self.uimanager.insert_action_group(self.appactions, gio_group=self.app)
def preferences_activate(self, *obj):
"""
Open the preferences dialog.
"""
try:
GrampsPreferences(self.uistate, self.dbstate)
except WindowActiveError:
return
def reset_font(self):
"""
Reset to the default application font.
"""
Gtk.StyleContext.remove_provider_for_screen(self.window.get_screen(),
self.provider)
def change_font(self, font):
"""
Change the default application font.
Only in the case we use symbols.
"""
if config.get('utf8.in-use') and font != "":
css_font = CSS_FONT % font
try:
self.provider.load_from_data(css_font.encode('UTF-8'))
Gtk.StyleContext.add_provider_for_screen(
self.window.get_screen(), self.provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
return True
except:
# Force gramps to use the standard font.
print("I can't set the new font :", font)
config.set('utf8.in-use', False)
config.set('utf8.selected-font', "")
return False
def tip_of_day_activate(self, *obj):
"""
Display Tip of the day
"""
from .tipofday import TipOfDay
TipOfDay(self.uistate)
def __plugin_status(self, obj=None, data=None):
"""
Display plugin status dialog
"""
try:
PluginWindows.PluginStatus(self.dbstate, self.uistate, [])
except WindowActiveError:
pass
def navigator_toggle(self, action, value):
"""
Set the sidebar based on the value of the toggle button. Save the
results in the configuration settings
"""
action.set_state(value)
if value.get_boolean():
self.ebox.show()
config.set('interface.view', True)
self.show_navigator = True
else:
self.ebox.hide()
config.set('interface.view', False)
self.show_navigator = False
config.save()
def toolbar_toggle(self, action, value):
"""
Set the toolbar based on the value of the toggle button. Save the
results in the configuration settings
"""
action.set_state(value)
toolbar = self.uimanager.get_widget('ToolBar')
if value.get_boolean():
toolbar.show_all()
config.set('interface.toolbar-on', True)
else:
toolbar.hide()
config.set('interface.toolbar-on', False)
config.save()
def fullscreen_toggle(self, action, value):
"""
Set the main Gramps window fullscreen based on the value of the
toggle button. Save the setting in the config file.
"""
action.set_state(value)
if value.get_boolean():
self.window.fullscreen()
config.set('interface.fullscreen', True)
else:
self.window.unfullscreen()
config.set('interface.fullscreen', False)
config.save()
def get_views(self):
"""
Return the view definitions.
"""
return self.views
def goto_page(self, cat_num, view_num):
"""
Create the page if it doesn't exist and make it the current page.
"""
if view_num is None:
view_num = self.current_views[cat_num]
else:
self.current_views[cat_num] = view_num
page_num = self.page_lookup.get((cat_num, view_num))
if page_num is None:
page_def = self.views[cat_num][view_num]
page_num = self.notebook.get_n_pages()
self.page_lookup[(cat_num, view_num)] = page_num
self.__create_page(page_def[0], page_def[1])
self.notebook.set_current_page(page_num)
return self.pages[page_num]
def get_category(self, cat_name):
"""
Return the category number from the given category name.
"""
for cat_num, cat_views in enumerate(self.views):
if cat_name == cat_views[0][0].category[1]:
return cat_num
return None
def __create_dummy_page(self, pdata, error):
""" Create a dummy page """
from .views.pageview import DummyPage
return DummyPage(pdata.name, pdata, self.dbstate, self.uistate,
_("View failed to load. Check error output."), error)
def __create_page(self, pdata, page_def):
"""
Create a new page and set it as the current page.
"""
try:
page = page_def(pdata, self.dbstate, self.uistate)
except:
import traceback
LOG.warning("View '%s' failed to load.", pdata.id)
traceback.print_exc()
page = self.__create_dummy_page(pdata, traceback.format_exc())
try:
page_display = page.get_display()
except:
import traceback
print("ERROR: '%s' failed to create view" % pdata.name)
traceback.print_exc()
page = self.__create_dummy_page(pdata, traceback.format_exc())
page_display = page.get_display()
page.define_actions()
page.post()
self.pages.append(page)
# create icon/label for notebook tab (useful for debugging)
hbox = Gtk.Box()
image = Gtk.Image()
image.set_from_icon_name(page.get_stock(), Gtk.IconSize.MENU)
hbox.pack_start(image, False, True, 0)
hbox.add(Gtk.Label(label=pdata.name))
hbox.show_all()
page_num = self.notebook.append_page(page.get_display(), hbox)
if not self.file_loaded:
self.uimanager.set_actions_visible(self.actiongroup, False)
self.uimanager.set_actions_visible(self.readonlygroup, False)
self.uimanager.set_actions_visible(self.undoactions, False)
self.uimanager.set_actions_visible(self.redoactions, False)
return page
def view_changed(self, notebook, page, page_num):
"""
Called when the notebook page is changed.
"""
if self.view_changing:
return
self.view_changing = True
cat_num = view_num = None
for key in self.page_lookup:
if self.page_lookup[key] == page_num:
cat_num, view_num = key
break
# Save last view in configuration
view_id = self.views[cat_num][view_num][0].id
config.set('preferences.last-view', view_id)
last_views = config.get('preferences.last-views')
if len(last_views) != len(self.views):
# If the number of categories has changed then reset the defaults
last_views = [''] * len(self.views)
last_views[cat_num] = view_id
config.set('preferences.last-views', last_views)
config.save()
self.navigator.view_changed(cat_num, view_num)
self.__change_page(page_num)
self.view_changing = False
def __change_page(self, page_num):
"""
Perform necessary actions when a page is changed.
"""
if not self.dbstate.is_open():
return
self.__disconnect_previous_page()
self.active_page = self.pages[page_num]
self.__connect_active_page(page_num)
self.active_page.set_active()
while Gtk.events_pending():
Gtk.main_iteration()
self.uimanager.update_menu()
while Gtk.events_pending():
Gtk.main_iteration()
self.active_page.change_page()
def __delete_pages(self):
"""
Calls on_delete() for each view
"""
for page in self.pages:
page.on_delete()
def __disconnect_previous_page(self):
"""
Disconnects the previous page, removing the old action groups
and removes the old UI components.
"""
list(map(self.uimanager.remove_ui, self.merge_ids))
if self.active_page is not None:
self.active_page.set_inactive()
groups = self.active_page.get_actions()
for grp in groups:
if grp in self.uimanager.get_action_groups():
self.uimanager.remove_action_group(grp)
self.active_page = None
def __connect_active_page(self, page_num):
"""
Inserts the action groups associated with the current page
into the UIManager
"""
for grp in self.active_page.get_actions():
self.uimanager.insert_action_group(grp)
uidef = self.active_page.ui_definition()
self.merge_ids = [self.uimanager.add_ui_from_string(uidef)]
for uidef in self.active_page.additional_ui_definitions():
mergeid = self.uimanager.add_ui_from_string(uidef)
self.merge_ids.append(mergeid)
configaction = self.uimanager.get_action(self.actiongroup,
'ConfigView')
if self.active_page.can_configure():
configaction.set_enabled(True)
else:
configaction.set_enabled(False)
def import_data(self, *obj):
"""
Imports a file
"""
if self.dbstate.is_open():
self.db_loader.import_file()
infotxt = self.db_loader.import_info_text()
if infotxt:
InfoDialog(_('Import Statistics'), infotxt,
parent=self.window)
self.__post_load()
def __open_activate(self, obj, value):
"""
Called when the Open button is clicked, opens the DbManager
"""
from .dbman import DbManager
dialog = DbManager(self.uistate, self.dbstate, self, self.window)
value = dialog.run()
if value:
if self.dbstate.is_open():
self.dbstate.db.close(user=self.user)
(filename, title) = value
self.db_loader.read_file(filename)
if self.dbstate.db.is_open():
self._post_load_newdb(filename, 'x-directory/normal', title)
else:
if dialog.after_change != "":
# We change the title of the main window.
old_title = self.uistate.window.get_title()
if old_title:
delim = old_title.find(' - ')
tit1 = old_title[:delim]
tit2 = old_title[delim:]
new_title = dialog.after_change
if '<=' in tit2:
## delim2 = tit2.find('<=') + 3
## tit3 = tit2[delim2:-1]
new_title += tit2.replace(']', '') + ' => ' + tit1 + ']'
else:
new_title += tit2 + ' <= [' + tit1 + ']'
self.uistate.window.set_title(new_title)
def __post_load(self):
"""
This method is for the common UI post_load, both new files
and added data like imports.
"""
self.dbstate.db.undo_callback = self.__change_undo_label
self.dbstate.db.redo_callback = self.__change_redo_label
self.__change_undo_label(None, update_menu=False)
self.__change_redo_label(None, update_menu=False)
self.dbstate.db.undo_history_callback = self.undo_history_update
self.undo_history_close()
def _post_load_newdb(self, filename, filetype, title=None):
"""
The method called after load of a new database.
Inherit CLI method to add GUI part
"""
self._post_load_newdb_nongui(filename, title)
self._post_load_newdb_gui(filename, filetype, title)
def _post_load_newdb_gui(self, filename, filetype, title=None):
"""
Called after a new database is loaded to do GUI stuff
"""
# GUI related post load db stuff
# Update window title
if filename[-1] == os.path.sep:
filename = filename[:-1]
name = os.path.basename(filename)
if title:
name = title
rw = not self.dbstate.db.readonly
if rw:
msg = "%s - Gramps" % name
else:
msg = "%s (%s) - Gramps" % (name, _('Read Only'))
self.uistate.window.set_title(msg)
if(bool(config.get('behavior.runcheck')) and QuestionDialog2(
_("Gramps had a problem the last time it was run."),
_("Would you like to run the Check and Repair tool?"),
_("Yes"), _("No"), parent=self.uistate.window).run()):
pdata = self._pmgr.get_plugin('check')
mod = self._pmgr.load_plugin(pdata)
tool.gui_tool(dbstate=self.dbstate, user=self.user,
tool_class=getattr(mod, pdata.toolclass),
options_class=getattr(mod, pdata.optionclass),
translated_name=pdata.name,
name=pdata.id,
category=pdata.category,
callback=self.dbstate.db.request_rebuild)
config.set('behavior.runcheck', False)
self.__change_page(self.notebook.get_current_page())
self.uimanager.set_actions_visible(self.actiongroup, rw)
self.uimanager.set_actions_visible(self.readonlygroup, True)
self.uimanager.set_actions_visible(self.undoactions, rw)
self.uimanager.set_actions_visible(self.redoactions, rw)
self.recent_manager.build()
# Call common __post_load method for GUI update after a change
self.__post_load()
def post_close_db(self):
"""
Called after a database is closed to do GUI stuff.
"""
self.undo_history_close()
self.uistate.window.set_title("%s - Gramps" % _('No Family Tree'))
self.uistate.clear_filter_results()
self.__disconnect_previous_page()
self.uimanager.set_actions_visible(self.actiongroup, False)
self.uimanager.set_actions_visible(self.readonlygroup, False)
self.uimanager.set_actions_visible(self.undoactions, False)
self.uimanager.set_actions_visible(self.redoactions, False)
self.uimanager.update_menu()
config.set('paths.recent-file', '')
config.save()
def enable_menu(self, enable):
""" Enable/disable the menues. Used by the dbloader for import to
prevent other operations during import. Needed because simpler methods
don't work under Gnome with application menus at top of screen (instead
of Gramps window).
Note: enable must be set to False on first call.
"""
if not enable:
self.action_st = (
self.uimanager.get_actions_sensitive(self.actiongroup),
self.uimanager.get_actions_sensitive(self.readonlygroup),
self.uimanager.get_actions_sensitive(self.undoactions),
self.uimanager.get_actions_sensitive(self.redoactions),
self.uimanager.get_actions_sensitive(self.fileactions),
self.uimanager.get_actions_sensitive(self.toolactions),
self.uimanager.get_actions_sensitive(self.reportactions),
self.uimanager.get_actions_sensitive(
self.recent_manager.action_group))
self.uimanager.set_actions_sensitive(self.actiongroup, enable)
self.uimanager.set_actions_sensitive(self.readonlygroup, enable)
self.uimanager.set_actions_sensitive(self.undoactions, enable)
self.uimanager.set_actions_sensitive(self.redoactions, enable)
self.uimanager.set_actions_sensitive(self.fileactions, enable)
self.uimanager.set_actions_sensitive(self.toolactions, enable)
self.uimanager.set_actions_sensitive(self.reportactions, enable)
self.uimanager.set_actions_sensitive(
self.recent_manager.action_group, enable)
else:
self.uimanager.set_actions_sensitive(
self.actiongroup, self.action_st[0])
self.uimanager.set_actions_sensitive(
self.readonlygroup, self.action_st[1])
self.uimanager.set_actions_sensitive(
self.undoactions, self.action_st[2])
self.uimanager.set_actions_sensitive(
self.redoactions, self.action_st[3])
self.uimanager.set_actions_sensitive(
self.fileactions, self.action_st[4])
self.uimanager.set_actions_sensitive(
self.toolactions, self.action_st[5])
self.uimanager.set_actions_sensitive(
self.reportactions, self.action_st[6])
self.uimanager.set_actions_sensitive(
self.recent_manager.action_group, self.action_st[7])
def __change_undo_label(self, label, update_menu=True):
"""
Change the UNDO label
"""
_menu = '''<placeholder id="undo">
<item>
<attribute name="action">win.Undo</attribute>
<attribute name="label">%s</attribute>
</item>
</placeholder>
'''
if not label:
label = _('_Undo')
self.uimanager.set_actions_sensitive(self.undoactions, False)
else:
self.uimanager.set_actions_sensitive(self.undoactions, True)
self.uimanager.add_ui_from_string([_menu % html.escape(label)])
if update_menu:
self.uimanager.update_menu()
def __change_redo_label(self, label, update_menu=True):
"""
Change the REDO label
"""
_menu = '''<placeholder id="redo">
<item>
<attribute name="action">win.Redo</attribute>
<attribute name="label">%s</attribute>
</item>
</placeholder>
'''
if not label:
label = _('_Redo')
self.uimanager.set_actions_sensitive(self.redoactions, False)
else:
self.uimanager.set_actions_sensitive(self.redoactions, True)
self.uimanager.add_ui_from_string([_menu % html.escape(label)])
if update_menu:
self.uimanager.update_menu()
def undo_history_update(self):
"""
This function is called to update both the state of
the Undo History menu item (enable/disable) and
the contents of the Undo History window.
"""
try:
# Try updating undo history window if it exists
self.undo_history_window.update()
except AttributeError:
# Let it go: history window does not exist
return
def undo_history_close(self):
"""
Closes the undo history
"""
try:
# Try closing undo history window if it exists
if self.undo_history_window.opened:
self.undo_history_window.close()
except AttributeError:
# Let it go: history window does not exist
return
def quick_backup(self, *obj):
"""
Make a quick XML back with or without media.
"""
try:
QuickBackup(self.dbstate, self.uistate, self.user)
except WindowActiveError:
return
def autobackup(self):
"""
Backup the current family tree.
"""
if self.dbstate.db.is_open() and self.dbstate.db.has_changed:
self.uistate.set_busy_cursor(True)
self.uistate.progress.show()
self.uistate.push_message(self.dbstate, _("Autobackup..."))
try:
self.__backup()
except DbWriteFailure as msg:
self.uistate.push_message(self.dbstate,
_("Error saving backup data"))
self.uistate.set_busy_cursor(False)
self.uistate.progress.hide()
def __backup(self):
"""
Backup database to a Gramps XML file.
"""
from gramps.plugins.export.exportxml import XmlWriter
backup_path = config.get('database.backup-path')
compress = config.get('database.compress-backup')
writer = XmlWriter(self.dbstate.db, self.user, strip_photos=0,
compress=compress)
timestamp = '{0:%Y-%m-%d-%H-%M-%S}'.format(datetime.datetime.now())
backup_name = "%s-%s.gramps" % (self.dbstate.db.get_dbname(),
timestamp)
filename = os.path.join(backup_path, backup_name)
writer.write(filename)
def reports_clicked(self, *obj):
"""
Displays the Reports dialog
"""
try:
ReportPluginDialog(self.dbstate, self.uistate, [])
except WindowActiveError:
return
def tools_clicked(self, *obj):
"""
Displays the Tools dialog
"""
try:
ToolPluginDialog(self.dbstate, self.uistate, [])
except WindowActiveError:
return
def clipboard(self, *obj):
"""
Displays the Clipboard
"""
from .clipboard import ClipboardWindow
try:
ClipboardWindow(self.dbstate, self.uistate)
except WindowActiveError:
return
# ---------------Add new xxx --------------------------------
def add_new_person(self, *obj):
"""
Add a new person to the database. (Global keybinding)
"""
person = Person()
#the editor requires a surname
person.primary_name.add_surname(Surname())
person.primary_name.set_primary_surname(0)
try:
EditPerson(self.dbstate, self.uistate, [], person)
except WindowActiveError:
pass
def add_new_family(self, *obj):
"""
Add a new family to the database. (Global keybinding)
"""
family = Family()
try:
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
pass
def add_new_event(self, *obj):
"""
Add a new custom/unknown event (Note you type first letter of event)
"""
try:
event = Event()
event.set_type(EventType.UNKNOWN)
EditEvent(self.dbstate, self.uistate, [], event)
except WindowActiveError:
pass
def add_new_place(self, *obj):
"""Add a new place to the place list"""
try:
EditPlace(self.dbstate, self.uistate, [], Place())
except WindowActiveError:
pass
def add_new_source(self, *obj):
"""Add a new source to the source list"""
try:
EditSource(self.dbstate, self.uistate, [], Source())
except WindowActiveError:
pass
def add_new_repository(self, *obj):
"""Add a new repository to the repository list"""
try:
EditRepository(self.dbstate, self.uistate, [], Repository())
except WindowActiveError:
pass
def add_new_citation(self, *obj):
"""
Add a new citation
"""
try:
EditCitation(self.dbstate, self.uistate, [], Citation())
except WindowActiveError:
pass
def add_new_media(self, *obj):
"""Add a new media object to the media list"""
try:
EditMedia(self.dbstate, self.uistate, [], Media())
except WindowActiveError:
pass
def add_new_note(self, *obj):
"""Add a new note to the note list"""
try:
EditNote(self.dbstate, self.uistate, [], Note())
except WindowActiveError:
pass
# ------------------------------------------------------------------------
def config_view(self, *obj):
"""
Displays the configuration dialog for the active view
"""
self.active_page.configure()
def undo(self, *obj):
"""
Calls the undo function on the database
"""
self.uistate.set_busy_cursor(True)
self.dbstate.db.undo()
self.uistate.set_busy_cursor(False)
def redo(self, *obj):
"""
Calls the redo function on the database
"""
self.uistate.set_busy_cursor(True)
self.dbstate.db.redo()
self.uistate.set_busy_cursor(False)
def undo_history(self, *obj):
"""
Displays the Undo history window
"""
try:
self.undo_history_window = UndoHistory(self.dbstate, self.uistate)
except WindowActiveError:
return
def export_data(self, *obj):
"""
Calls the ExportAssistant to export data
"""
if self.dbstate.is_open():
from .plug.export import ExportAssistant
try:
ExportAssistant(self.dbstate, self.uistate)
except WindowActiveError:
return
def __rebuild_report_and_tool_menus(self):
"""
Callback that rebuilds the tools and reports menu
"""
self.__build_tools_menu(self._pmgr.get_reg_tools())
self.__build_report_menu(self._pmgr.get_reg_reports())
self.uistate.set_relationship_class()
def __build_tools_menu(self, tool_menu_list):
"""
Builds a new tools menu
"""
if self.toolactions:
self.uistate.uimanager.remove_action_group(self.toolactions)
self.uistate.uimanager.remove_ui(self.tool_menu_ui_id)
self.toolactions = ActionGroup(name='ToolWindow')
(uidef, actions) = self.build_plugin_menu(
'ToolsMenu', tool_menu_list, tool.tool_categories,
make_plugin_callback)
self.toolactions.add_actions(actions)
self.tool_menu_ui_id = self.uistate.uimanager.add_ui_from_string(uidef)
self.uimanager.insert_action_group(self.toolactions)
def __build_report_menu(self, report_menu_list):
"""
Builds a new reports menu
"""
if self.reportactions:
self.uistate.uimanager.remove_action_group(self.reportactions)
self.uistate.uimanager.remove_ui(self.report_menu_ui_id)
self.reportactions = ActionGroup(name='ReportWindow')
(udef, actions) = self.build_plugin_menu(
'ReportsMenu', report_menu_list, standalone_categories,
make_plugin_callback)
self.reportactions.add_actions(actions)
self.report_menu_ui_id = self.uistate.uimanager.add_ui_from_string(udef)
self.uimanager.insert_action_group(self.reportactions)
def build_plugin_menu(self, text, item_list, categories, func):
"""
Builds a new XML description for a menu based on the list of plugindata
"""
menuitem = ('<item>\n'
'<attribute name="action">win.%s</attribute>\n'
'<attribute name="label">%s...</attribute>\n'
'</item>\n')
actions = []
ofile = StringIO()
ofile.write('<section id="%s">' % ('P_' + text))
hash_data = defaultdict(list)
for pdata in item_list:
if not pdata.supported:
category = _UNSUPPORTED
else:
category = categories[pdata.category]
hash_data[category].append(pdata)
# Sort categories, skipping the unsupported
catlist = sorted(item for item in hash_data if item != _UNSUPPORTED)
for key in catlist:
ofile.write('<submenu>\n<attribute name="label"'
'>%s</attribute>\n' % key[1])
pdatas = hash_data[key]
pdatas.sort(key=lambda x: x.name)
for pdata in pdatas:
new_key = valid_action_name(pdata.id)
ofile.write(menuitem % (new_key, pdata.name))
actions.append((new_key, func(pdata, self.dbstate,
self.uistate)))
ofile.write('</submenu>\n')
# If there are any unsupported items we add separator
# and the unsupported category at the end of the menu
if _UNSUPPORTED in hash_data:
ofile.write('<submenu>\n<attribute name="label"'
'>%s</attribute>\n' %
_UNSUPPORTED[1])
pdatas = hash_data[_UNSUPPORTED]
pdatas.sort(key=lambda x: x.name)
for pdata in pdatas:
new_key = pdata.id.replace(' ', '-')
ofile.write(menuitem % (new_key, pdata.name))
actions.append((new_key, func(pdata, self.dbstate,
self.uistate)))
ofile.write('</submenu>\n')
ofile.write('</section>\n')
return ([ofile.getvalue()], actions)
def display_about_box(self, *obj):
"""Display the About box."""
about = GrampsAboutDialog(self.uistate.window)
about.run()
about.destroy()
def get_available_views(self):
"""
Query the views and determine what views to show and in which order
:Returns: a list of lists containing tuples (view_id, viewclass)
"""
pmgr = GuiPluginManager.get_instance()
view_list = pmgr.get_reg_views()
viewstoshow = defaultdict(list)
for pdata in view_list:
mod = pmgr.load_plugin(pdata)
if not mod or not hasattr(mod, pdata.viewclass):
#import of plugin failed
try:
lasterror = pmgr.get_fail_list()[-1][1][1]
except:
lasterror = '*** No error found, '
lasterror += 'probably error in gpr.py file ***'
ErrorDialog(
_('Failed Loading View'),
_('The view %(name)s did not load and reported an error.'
'\n\n%(error_msg)s\n\n'
'If you are unable to fix the fault yourself then you '
'can submit a bug at %(gramps_bugtracker_url)s '
'or contact the view author (%(firstauthoremail)s).\n\n'
'If you do not want Gramps to try and load this view '
'again, you can hide it by using the Plugin Manager '
'on the Help menu.'
) % {'name': pdata.name,
'gramps_bugtracker_url': URL_BUGHOME,
'firstauthoremail': pdata.authors_email[0]
if pdata.authors_email else '...',
'error_msg': lasterror},
parent=self.uistate.window)
continue
viewclass = getattr(mod, pdata.viewclass)
# pdata.category is (string, trans-string):
if pdata.order == START:
viewstoshow[pdata.category[0]].insert(0, (pdata, viewclass))
else:
viewstoshow[pdata.category[0]].append((pdata, viewclass))
# First, get those in order defined, if exists:
resultorder = [viewstoshow[cat]
for cat in config.get("interface.view-categories")
if cat in viewstoshow]
# Next, get the rest in some order:
resultorder.extend(viewstoshow[cat]
for cat in sorted(viewstoshow.keys())
if viewstoshow[cat] not in resultorder)
return resultorder
def key_bindings(*obj):
"""
Display key bindings
"""
display_help(webpage=WIKI_HELP_PAGE_KEY)
def manual_activate(*obj):
"""
Display the Gramps manual
"""
display_help(webpage=WIKI_HELP_PAGE_MAN)
def report_bug_activate(*obj):
"""
Display the bug tracker web site
"""
display_url(URL_BUGTRACKER)
def home_page_activate(*obj):
"""
Display the Gramps home page
"""
display_url(URL_HOMEPAGE)
def mailing_lists_activate(*obj):
"""
Display the mailing list web page
"""
display_url(URL_MAILINGLIST)
def extra_plugins_activate(*obj):
"""
Display the wiki page with extra plugins
"""
display_url(URL_WIKISTRING+WIKI_EXTRAPLUGINS)
def faq_activate(*obj):
"""
Display FAQ
"""
display_help(webpage=WIKI_HELP_PAGE_FAQ)
def run_plugin(pdata, dbstate, uistate):
"""
run a plugin based on it's PluginData:
1/ load plugin.
2/ the report is run
"""
pmgr = GuiPluginManager.get_instance()
mod = pmgr.load_plugin(pdata)
if not mod:
#import of plugin failed
failed = pmgr.get_fail_list()
if failed:
error_msg = failed[-1][1][1]
else:
error_msg = "(no error message)"
ErrorDialog(
_('Failed Loading Plugin'),
_('The plugin %(name)s did not load and reported an error.\n\n'
'%(error_msg)s\n\n'
'If you are unable to fix the fault yourself then you can '
'submit a bug at %(gramps_bugtracker_url)s or contact '
'the plugin author (%(firstauthoremail)s).\n\n'
'If you do not want Gramps to try and load this plugin again, '
'you can hide it by using the Plugin Manager on the '
'Help menu.') % {'name' : pdata.name,
'gramps_bugtracker_url' : URL_BUGHOME,
'firstauthoremail' : pdata.authors_email[0]
if pdata.authors_email
else '...',
'error_msg' : error_msg},
parent=uistate.window)
return
if pdata.ptype == REPORT:
report(dbstate, uistate, uistate.get_active('Person'),
getattr(mod, pdata.reportclass),
getattr(mod, pdata.optionclass),
pdata.name, pdata.id,
pdata.category, pdata.require_active)
else:
tool.gui_tool(dbstate=dbstate, user=User(uistate=uistate),
tool_class=getattr(mod, pdata.toolclass),
options_class=getattr(mod, pdata.optionclass),
translated_name=pdata.name,
name=pdata.id,
category=pdata.category,
callback=dbstate.db.request_rebuild)
gc.collect(2)
def make_plugin_callback(pdata, dbstate, uistate):
"""
Makes a callback for a report/tool menu item
"""
return lambda x, y: run_plugin(pdata, dbstate, uistate)
def views_to_show(views, use_last=True):
"""
Determine based on preference setting which views should be shown
"""
current_cat = 0
current_cat_view = 0
default_cat_views = [0] * len(views)
if use_last:
current_page_id = config.get('preferences.last-view')
default_page_ids = config.get('preferences.last-views')
found = False
for indexcat, cat_views in enumerate(views):
cat_view = 0
for pdata, page_def in cat_views:
if not found:
if pdata.id == current_page_id:
current_cat = indexcat
current_cat_view = cat_view
default_cat_views[indexcat] = cat_view
found = True
break
if pdata.id in default_page_ids:
default_cat_views[indexcat] = cat_view
cat_view += 1
if not found:
current_cat = 0
current_cat_view = 0
return current_cat, current_cat_view, default_cat_views
class QuickBackup(ManagedWindow): # TODO move this class into its own module
def __init__(self, dbstate, uistate, user):
"""
Make a quick XML back with or without media.
"""
self.dbstate = dbstate
self.user = user
ManagedWindow.__init__(self, uistate, [], self.__class__)
window = Gtk.Dialog(title='',
transient_for=self.uistate.window,
destroy_with_parent=True)
self.set_window(window, None, _("Gramps XML Backup"))
self.setup_configs('interface.quick-backup', 500, 150)
close_button = window.add_button(_('_Close'),
Gtk.ResponseType.CLOSE)
ok_button = window.add_button(_('_OK'),
Gtk.ResponseType.APPLY)
vbox = window.get_content_area()
hbox = Gtk.Box()
label = Gtk.Label(label=_("Path:"))
label.set_justify(Gtk.Justification.LEFT)
label.set_size_request(90, -1)
label.set_halign(Gtk.Align.START)
hbox.pack_start(label, False, True, 0)
path_entry = Gtk.Entry()
dirtext = config.get('paths.quick-backup-directory')
path_entry.set_text(dirtext)
hbox.pack_start(path_entry, True, True, 0)
file_entry = Gtk.Entry()
button = Gtk.Button()
button.connect("clicked",
lambda widget:
self.select_backup_path(widget, path_entry))
image = Gtk.Image()
image.set_from_icon_name('document-open', Gtk.IconSize.BUTTON)
image.show()
button.add(image)
hbox.pack_end(button, False, True, 0)
vbox.pack_start(hbox, False, True, 0)
hbox = Gtk.Box()
label = Gtk.Label(label=_("File:"))
label.set_justify(Gtk.Justification.LEFT)
label.set_size_request(90, -1)
label.set_halign(Gtk.Align.START)
hbox.pack_start(label, False, True, 0)
struct_time = time.localtime()
file_entry.set_text(
config.get('paths.quick-backup-filename'
) % {"filename": self.dbstate.db.get_dbname(),
"year": struct_time.tm_year,
"month": struct_time.tm_mon,
"day": struct_time.tm_mday,
"hour": struct_time.tm_hour,
"minutes": struct_time.tm_min,
"seconds": struct_time.tm_sec,
"extension": "gpkg"})
hbox.pack_end(file_entry, True, True, 0)
vbox.pack_start(hbox, False, True, 0)
hbox = Gtk.Box()
fbytes = 0
mbytes = "0"
for media in self.dbstate.db.iter_media():
fullname = media_path_full(self.dbstate.db, media.get_path())
try:
fbytes += os.path.getsize(fullname)
length = len(str(fbytes))
if fbytes <= 999999:
mbytes = "< 1"
else:
mbytes = str(fbytes)[:(length-6)]
except OSError:
pass
label = Gtk.Label(label=_("Media:"))
label.set_justify(Gtk.Justification.LEFT)
label.set_size_request(90, -1)
label.set_halign(Gtk.Align.START)
hbox.pack_start(label, False, True, 0)
include = Gtk.RadioButton.new_with_mnemonic_from_widget(
None, "%s (%s %s)" % (_("Include"),
mbytes, _("Megabyte|MB")))
exclude = Gtk.RadioButton.new_with_mnemonic_from_widget(include,
_("Exclude"))
include.connect("toggled", lambda widget: self.media_toggle(widget,
file_entry))
include_mode = config.get('preferences.quick-backup-include-mode')
if include_mode:
include.set_active(True)
else:
exclude.set_active(True)
hbox.pack_start(include, False, True, 0)
hbox.pack_end(exclude, False, True, 0)
vbox.pack_start(hbox, False, True, 0)
self.show()
dbackup = window.run()
if dbackup == Gtk.ResponseType.APPLY:
# if file exists, ask if overwrite; else abort
basefile = file_entry.get_text()
basefile = basefile.replace("/", r"-")
filename = os.path.join(path_entry.get_text(), basefile)
if os.path.exists(filename):
question = QuestionDialog2(
_("Backup file already exists! Overwrite?"),
_("The file '%s' exists.") % filename,
_("Proceed and overwrite"),
_("Cancel the backup"),
parent=self.window)
yes_no = question.run()
if not yes_no:
current_dir = path_entry.get_text()
if current_dir != dirtext:
config.set('paths.quick-backup-directory', current_dir)
self.close()
return
position = self.window.get_position() # crock
window.hide()
self.window.move(position[0], position[1])
self.uistate.set_busy_cursor(True)
self.uistate.pulse_progressbar(0)
self.uistate.progress.show()
self.uistate.push_message(self.dbstate, _("Making backup..."))
if include.get_active():
from gramps.plugins.export.exportpkg import PackageWriter
writer = PackageWriter(self.dbstate.db, filename, self.user)
writer.export()
else:
from gramps.plugins.export.exportxml import XmlWriter
writer = XmlWriter(self.dbstate.db, self.user,
strip_photos=0, compress=1)
writer.write(filename)
self.uistate.set_busy_cursor(False)
self.uistate.progress.hide()
self.uistate.push_message(self.dbstate,
_("Backup saved to '%s'") % filename)
config.set('paths.quick-backup-directory', path_entry.get_text())
else:
self.uistate.push_message(self.dbstate, _("Backup aborted"))
if dbackup != Gtk.ResponseType.DELETE_EVENT:
self.close()
def select_backup_path(self, widget, path_entry):
"""
Choose a backup folder. Make sure there is one highlighted in
right pane, otherwise FileChooserDialog will hang.
"""
fdialog = Gtk.FileChooserDialog(
title=_("Select backup directory"),
transient_for=self.window,
action=Gtk.FileChooserAction.SELECT_FOLDER)
fdialog.add_buttons(_('_Cancel'), Gtk.ResponseType.CANCEL,
_('_Apply'), Gtk.ResponseType.OK)
mpath = path_entry.get_text()
if not mpath:
mpath = HOME_DIR
fdialog.set_current_folder(os.path.dirname(mpath))
fdialog.set_filename(os.path.join(mpath, "."))
status = fdialog.run()
if status == Gtk.ResponseType.OK:
filename = fdialog.get_filename()
if filename:
path_entry.set_text(filename)
fdialog.destroy()
return True
def media_toggle(self, widget, file_entry):
"""
Toggles media include values in the quick backup dialog.
"""
include = widget.get_active()
config.set('preferences.quick-backup-include-mode', include)
extension = "gpkg" if include else "gramps"
filename = file_entry.get_text()
if "." in filename:
base, ext = filename.rsplit(".", 1)
file_entry.set_text("%s.%s" % (base, extension))
else:
file_entry.set_text("%s.%s" % (filename, extension))
| sam-m888/gramps | gramps/gui/viewmanager.py | Python | gpl-2.0 | 69,729 | [
"Brian"
] | cde72f2af4c3fd7aa71276e335ccd453c8b01bc823436987a6ff513c5babf1b5 |
import re
import ast
import sys
try:
from distutils.util import get_platform
is_windows = get_platform().startswith("win")
except ImportError:
# Don't break install if distuils is incompatible in some way
# probably overly defensive.
is_windows = False
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
'six',
'webob',
'psutil',
'pyyaml',
]
if sys.version_info[0] == 2:
requirements.append('PasteScript')
requirements.append('paste')
test_requirements = [
# TODO: put package test requirements here
]
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('pulsar/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
if is_windows:
scripts = ["scripts/pulsar.bat"]
else:
scripts = ["scripts/pulsar"]
setup(
name='pulsar-app',
version=version,
description='Distributed job execution application built for Galaxy (http://galaxyproject.org/).',
long_description=readme + '\n\n' + history,
author='Galaxy Project',
author_email='jmchilton@gmail.com',
url='https://github.com/galaxyproject/pulsar',
packages=[
'pulsar',
'pulsar.cache',
'pulsar.client',
'pulsar.client.test',
'pulsar.client.staging',
'pulsar.client.transport',
'pulsar.managers',
'pulsar.managers.base',
'pulsar.managers.staging',
'pulsar.managers.util',
'pulsar.managers.util.cli',
'pulsar.managers.util.cli.job',
'pulsar.managers.util.cli.shell',
'pulsar.managers.util.condor',
'pulsar.managers.util.drmaa',
'pulsar.managers.util.job_script',
'pulsar.mesos',
'pulsar.messaging',
'pulsar.scripts',
'pulsar.tools',
'pulsar.web',
'galaxy',
'galaxy.jobs',
'galaxy.jobs.metrics',
'galaxy.jobs.metrics.collectl',
'galaxy.jobs.metrics.instrumenters',
'galaxy.objectstore',
'galaxy.tools',
'galaxy.tools.linters',
'galaxy.tools.deps',
'galaxy.tools.deps.resolvers',
'galaxy.util',
],
entry_points='''
[console_scripts]
pulsar-main=pulsar.main:main
pulsar-check=pulsar.client.test.check:main
pulsar-config=pulsar.scripts.config:main
pulsar-drmaa-launch=pulsar.scripts.drmaa_launch:main
pulsar-drmaa-kill=pulsar.scripts.drmaa_kill:main
pulsar-chown-working-directory=pulsar.scripts.chown_working_directory:main
''',
scripts=scripts,
package_data={'pulsar': [
'managers/util/job_script/DEFAULT_JOB_FILE_TEMPLATE.sh',
'managers/util/job_script/CLUSTER_SLOTS_STATEMENT.sh',
]},
package_dir={'pulsar': 'pulsar',
'galaxy': 'galaxy'},
include_package_data=True,
install_requires=requirements,
license="Apache License 2.0",
zip_safe=False,
keywords='pulsar',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
test_suite='test',
tests_require=test_requirements
)
| jmchilton/pulsar | setup.py | Python | apache-2.0 | 3,694 | [
"Galaxy"
] | ebde0c90b3620fba8b80eb019b0e4650d77195dc6f4a84a2997aa5ec745f3a12 |
""" pycontact setup
by Maximilian Scheurer, Peter Rodenkirch
"""
from setuptools import setup, find_packages
from setuptools.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
extensions = [Extension("PyContact.cy_modules.cy_gridsearch",
["PyContact/cy_modules/cy_gridsearch.pyx"],
language="c++",
include_dirs=[".", "PyContact/cy_modules/src"],
extra_compile_args=["-std=c++0x"]), ]
setup(
name='pycontact',
version='1.0.4',
description='PyContact',
long_description='Tool for analysis of non-covalent interactions in MD trajectories',
url='https://github.com/maxscheurer/pycontact',
author='Maximilian Scheurer, Peter Rodenkirch',
author_email='mscheurer@ks.uiuc.edu',
license='GPLv3',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3.6',
],
keywords='computational biophysics simulation biology bioinformatics visualization protein biomolecules dna',
package_dir={'PyContact': 'PyContact'},
packages=find_packages(),
python_requires=">=3.6",
setup_requires=['cython'],
install_requires=['numpy >= 1.16',
'matplotlib',
'mdanalysis >= 0.20.0',
'cython',
'seaborn',
'scipy',
'PyQt5'],
cmdclass={'build_ext': build_ext},
ext_modules=cythonize(extensions),
package_data={'PyContact': ['exampleData/defaultsession',
'exampleData/*.psf', 'exampleData/*.pdb',
'exampleData/*.dcd', 'exampleData/*.tpr',
'exampleData/*.xtc', 'gui/*.tcl',
'db/aa.db', 'cy_modules/*.pyx',
'cy_modules/src/*']},
entry_points={
'console_scripts': [
'pycontact=PyContact.pycontact:main',
],
},
)
| maxscheurer/pycontact | setup.py | Python | gpl-3.0 | 2,253 | [
"MDAnalysis"
] | 5fb529c06e183957d7c7afc88370e3406609332789cb3eed7b79bf3c04676172 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
from pymatgen.io.feff.sets import FeffInputSet
from pymatgen.io.feff import FeffPot
from pymatgen.io.cif import CifParser
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
cif_file = 'CoO19128.cif'
central_atom = 'O'
cif_path = os.path.join(test_dir, cif_file)
r = CifParser(cif_path)
structure = r.get_structures()[0]
x = FeffInputSet("MaterialsProject")
class FeffInputSetTest(unittest.TestCase):
header_string = """* This FEFF.inp file generated by pymatgen
TITLE comment: From cif file
TITLE Source: CoO19128.cif
TITLE Structure Summary: Co2 O2
TITLE Reduced formula: CoO
TITLE space group: (P6_3mc), space number: (186)
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.000000 90.000000 120.000000
TITLE sites: 4
* 1 Co 0.666667 0.333333 0.003676
* 2 Co 0.333334 0.666666 0.503676
* 3 O 0.333334 0.666666 0.121324
* 4 O 0.666667 0.333333 0.621325"""
def test_get_header(self):
comment = 'From cif file'
header = str(FeffInputSet.get_header(x, structure, 'CoO19128.cif',
comment))
ref = FeffInputSetTest.header_string.splitlines()
last4 = [" ".join(l.split()[2:]) for l in ref[-4:]]
for i, l in enumerate(header.splitlines()):
if i < 9:
self.assertEqual(l, ref[i])
else:
s = " ".join(l.split()[2:])
self.assertIn(s, last4)
def test_getfefftags(self):
tags = FeffInputSet.get_feff_tags(x, "XANES").as_dict()
self.assertEqual(tags['COREHOLE'], "FSR",
"Failed to generate PARAMETERS string")
def test_get_feffPot(self):
POT = str(FeffInputSet.get_feff_pot(x, structure, central_atom))
d, dr = FeffPot.pot_dict_from_string(POT)
self.assertEqual(d['Co'], 1, "Wrong symbols read in for FeffPot")
def test_get_feff_atoms(self):
ATOMS = str(FeffInputSet.get_feff_atoms(x, structure, central_atom))
self.assertEqual(ATOMS.splitlines()[3].split()[4], central_atom,
"failed to create ATOMS string")
def test_to_and_from_dict(self):
d = x.as_dict(structure, 'XANES', 'cif', 'O', 'test')
f = d['feff.inp']
f2 = x.from_dict(d)
self.assertEqual(f, f2, "FeffinputSet to and from dict do not match")
if __name__ == '__main__':
unittest.main()
| migueldiascosta/pymatgen | pymatgen/io/feff/tests/test_sets.py | Python | mit | 2,669 | [
"FEFF",
"pymatgen"
] | 3ad1ce482cc10e599aa2a99ea1e96dbe9fde8e199028db05256d53924aa3d89b |
import unittest
from ase.io import read
from ase.calculators.emt import EMT
from geometricmd.curve_shorten import convert_vector_to_atoms, length, get_rotation
import math
import numpy as np
from scipy.optimize import check_grad
# Check length can be computed
def check_length(total_number_of_points):
try:
energy = 100.0
molecule = read('test_files/x0.xyz')
molecule.set_calculator(EMT())
start_point = read('test_files/x0.xyz')
start_point.set_calculator(EMT())
end_point = read('test_files/xN.xyz')
start = start_point.get_positions().flatten()
end = end_point.get_positions().flatten()
dimension = len(molecule.get_positions().flatten())
codimension = dimension - 1
rotation_matrix = get_rotation(start, end, dimension)
mass_matrix = np.diag(np.dstack((molecule.get_masses(),) * (dimension /
len(molecule.get_masses()))).flatten())
def metric(point):
molecule.set_positions(convert_vector_to_atoms(point))
cf = math.sqrt(max([2*(energy - molecule.get_potential_energy()), 1E-9]))
return [cf, molecule.get_forces().flatten()/cf]
x = np.random.rand((total_number_of_points-2) * codimension)
l = length(x, start, end, mass_matrix, rotation_matrix, total_number_of_points, codimension, metric)
return True
except:
return False
# Check length can be computed
def check_length_gradient(total_number_of_points):
try:
energy = 100.0
molecule = read('test_files/x0.xyz')
molecule.set_calculator(EMT())
start_point = read('test_files/x0.xyz')
start_point.set_calculator(EMT())
end_point = read('test_files/xN.xyz')
start = start_point.get_positions().flatten()
end = end_point.get_positions().flatten()
dimension = len(molecule.get_positions().flatten())
codimension = dimension - 1
rotation_matrix = get_rotation(start, end, dimension)
mass_matrix = np.diag(np.dstack((molecule.get_masses(),) * (dimension /
len(molecule.get_masses()))).flatten())
def metric(point):
molecule.set_positions(convert_vector_to_atoms(point))
cf = math.sqrt(max([2*(energy - molecule.get_potential_energy()), 1E-9]))
return [cf, molecule.get_forces().flatten()/cf]
x = np.random.rand((total_number_of_points-2) * codimension)
def L(x):
return length(x, start, end, mass_matrix, rotation_matrix, total_number_of_points, codimension, metric)[0]
def GL(x):
return length(x, start, end, mass_matrix, rotation_matrix, total_number_of_points, codimension, metric)[1]
err = check_grad(L,GL,x)
if abs(err) < 1E-4:
return True
else:
return False
except:
return False
# Compile class of unit tests.
class GeometryTests(unittest.TestCase):
def testOne(self):
self.failUnless(check_length(10))
def testTwo(self):
self.failUnless(check_length_gradient(10))
def main():
unittest.main()
if __name__ == '__main__':
main() | suttond/GeometricMD | tests/test_length.py | Python | lgpl-3.0 | 3,310 | [
"ASE"
] | 72d0e30d9019cc29c48b6c3efe3ef0d6016b87f80a310e1e528da462a6dda147 |
class ExampleCtrl(object):
"""Mealy transducer.
Internal states are integers, the current state
is stored in the attribute "state".
To take a transition, call method "move".
The names of input variables are stored in the
attribute "input_vars".
Automatically generated by tulip.dumpsmach on 2015-08-13 05:18:57 UTC
To learn more about TuLiP, visit http://tulip-control.org
"""
def __init__(self):
self.state = 52
self.input_vars = ['env2']
def move(self, env2):
"""Given inputs, take move and return outputs.
@rtype: dict
@return: dictionary with keys of the output variable names:
['loc', 'stage']
"""
output = dict()
if self.state == 0:
if (env2 == 0):
self.state = 0
output["loc"] = 16
output["stage"] = 1
elif (env2 == 1):
self.state = 33
output["loc"] = 17
output["stage"] = 1
elif (env2 == 2):
self.state = 34
output["loc"] = 17
output["stage"] = 1
else:
self._error(env2)
elif self.state == 1:
if (env2 == 0):
self.state = 0
output["loc"] = 16
output["stage"] = 1
elif (env2 == 1):
self.state = 33
output["loc"] = 17
output["stage"] = 1
elif (env2 == 2):
self.state = 34
output["loc"] = 17
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 2:
if (env2 == 0):
self.state = 0
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 34
output["loc"] = 17
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 1):
self.state = 33
output["loc"] = 17
output["stage"] = 1
else:
self._error(env2)
elif self.state == 3:
if (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 2
output["loc"] = 16
output["stage"] = 1
elif (env2 == 1):
self.state = 1
output["loc"] = 16
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 4:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 2
output["loc"] = 16
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 5:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 5
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 6:
if (env2 == 0):
self.state = 8
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 18
output["loc"] = 21
output["stage"] = 1
elif (env2 == 2):
self.state = 19
output["loc"] = 21
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 7:
if (env2 == 0):
self.state = 8
output["loc"] = 20
output["stage"] = 1
elif (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 18
output["loc"] = 21
output["stage"] = 1
elif (env2 == 2):
self.state = 19
output["loc"] = 21
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 8:
if (env2 == 0):
self.state = 8
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 18
output["loc"] = 21
output["stage"] = 1
elif (env2 == 2):
self.state = 19
output["loc"] = 21
output["stage"] = 1
else:
self._error(env2)
elif self.state == 9:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 7):
self.state = 20
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 10:
if (env2 == 7):
self.state = 10
output["loc"] = 20
output["stage"] = 2
elif (env2 == 6):
self.state = 11
output["loc"] = 20
output["stage"] = 2
elif (env2 == 5):
self.state = 12
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 11:
if (env2 == 7):
self.state = 10
output["loc"] = 20
output["stage"] = 2
elif (env2 == 6):
self.state = 11
output["loc"] = 20
output["stage"] = 2
elif (env2 == 5):
self.state = 12
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 12:
if (env2 == 7):
self.state = 10
output["loc"] = 20
output["stage"] = 2
elif (env2 == 6):
self.state = 11
output["loc"] = 20
output["stage"] = 2
elif (env2 == 5):
self.state = 12
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 13:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 6):
self.state = 11
output["loc"] = 20
output["stage"] = 2
elif (env2 == 5):
self.state = 12
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 14:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 5):
self.state = 12
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 15:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 0):
self.state = 17
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 16:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 0):
self.state = 17
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 17:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 0):
self.state = 17
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 18:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 0):
self.state = 17
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 19:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 0):
self.state = 17
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 20:
if (env2 == 7):
self.state = 20
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 21:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 18
output["loc"] = 21
output["stage"] = 1
elif (env2 == 2):
self.state = 19
output["loc"] = 21
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 22:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 7):
self.state = 20
output["loc"] = 20
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 23:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 7):
self.state = 20
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 24:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 2):
self.state = 19
output["loc"] = 21
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 25:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 7):
self.state = 20
output["loc"] = 20
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 26:
if (env2 == 6):
self.state = 9
output["loc"] = 19
output["stage"] = 1
elif (env2 == 4):
self.state = 26
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 27
output["loc"] = 18
output["stage"] = 1
elif (env2 == 2):
self.state = 29
output["loc"] = 19
output["stage"] = 1
elif (env2 == 5):
self.state = 25
output["loc"] = 19
output["stage"] = 1
else:
self._error(env2)
elif self.state == 27:
if (env2 == 5):
self.state = 25
output["loc"] = 19
output["stage"] = 1
elif (env2 == 4):
self.state = 26
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 27
output["loc"] = 18
output["stage"] = 1
elif (env2 == 1):
self.state = 28
output["loc"] = 19
output["stage"] = 1
elif (env2 == 2):
self.state = 29
output["loc"] = 19
output["stage"] = 1
else:
self._error(env2)
elif self.state == 28:
if (env2 == 0):
self.state = 8
output["loc"] = 20
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 6
output["loc"] = 20
output["stage"] = 1
elif (env2 == 2):
self.state = 7
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 29:
if (env2 == 0):
self.state = 8
output["loc"] = 20
output["stage"] = 1
elif (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 6
output["loc"] = 20
output["stage"] = 1
elif (env2 == 2):
self.state = 7
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 30:
if (env2 == 0):
self.state = 32
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 27
output["loc"] = 18
output["stage"] = 1
elif (env2 == 1):
self.state = 28
output["loc"] = 19
output["stage"] = 1
elif (env2 == 2):
self.state = 29
output["loc"] = 19
output["stage"] = 1
else:
self._error(env2)
elif self.state == 31:
if (env2 == 0):
self.state = 32
output["loc"] = 18
output["stage"] = 1
elif (env2 == 4):
self.state = 26
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 27
output["loc"] = 18
output["stage"] = 1
elif (env2 == 1):
self.state = 28
output["loc"] = 19
output["stage"] = 1
elif (env2 == 2):
self.state = 29
output["loc"] = 19
output["stage"] = 1
else:
self._error(env2)
elif self.state == 32:
if (env2 == 0):
self.state = 32
output["loc"] = 18
output["stage"] = 1
elif (env2 == 1):
self.state = 28
output["loc"] = 19
output["stage"] = 1
elif (env2 == 2):
self.state = 29
output["loc"] = 19
output["stage"] = 1
else:
self._error(env2)
elif self.state == 33:
if (env2 == 0):
self.state = 32
output["loc"] = 18
output["stage"] = 1
elif (env2 == 1):
self.state = 30
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 31
output["loc"] = 18
output["stage"] = 1
else:
self._error(env2)
elif self.state == 34:
if (env2 == 0):
self.state = 32
output["loc"] = 18
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 1):
self.state = 30
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 31
output["loc"] = 18
output["stage"] = 1
else:
self._error(env2)
elif self.state == 35:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 37
output["loc"] = 0
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 36:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 37
output["loc"] = 0
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 37:
if (env2 == 5):
self.state = 35
output["loc"] = 8
output["stage"] = 1
elif (env2 == 6):
self.state = 36
output["loc"] = 8
output["stage"] = 1
elif (env2 == 7):
self.state = 37
output["loc"] = 0
output["stage"] = 1
else:
self._error(env2)
elif self.state == 38:
if (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 34
output["loc"] = 17
output["stage"] = 1
elif (env2 == 1):
self.state = 33
output["loc"] = 17
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 39:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 5
output["loc"] = 16
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 40:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 5
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 41:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 34
output["loc"] = 17
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 42:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 37
output["loc"] = 0
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 43:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 37
output["loc"] = 0
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 44:
if (env2 == 1):
self.state = 48
output["loc"] = 0
output["stage"] = 0
elif (env2 == 0):
self.state = 44
output["loc"] = 0
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 45:
if (env2 == 6):
self.state = 43
output["loc"] = 8
output["stage"] = 0
elif (env2 == 5):
self.state = 42
output["loc"] = 8
output["stage"] = 0
elif (env2 == 3):
self.state = 3
output["loc"] = 8
output["stage"] = 0
elif (env2 == 4):
self.state = 4
output["loc"] = 8
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 46:
if (env2 == 1):
self.state = 48
output["loc"] = 0
output["stage"] = 0
elif (env2 == 0):
self.state = 44
output["loc"] = 0
output["stage"] = 0
elif (env2 == 3):
self.state = 3
output["loc"] = 8
output["stage"] = 0
elif (env2 == 4):
self.state = 4
output["loc"] = 8
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 47:
if (env2 == 6):
self.state = 43
output["loc"] = 8
output["stage"] = 0
elif (env2 == 5):
self.state = 42
output["loc"] = 8
output["stage"] = 0
elif (env2 == 7):
self.state = 51
output["loc"] = 0
output["stage"] = 0
elif (env2 == 4):
self.state = 4
output["loc"] = 8
output["stage"] = 0
else:
self._error(env2)
elif self.state == 48:
if (env2 == 1):
self.state = 48
output["loc"] = 0
output["stage"] = 0
elif (env2 == 3):
self.state = 3
output["loc"] = 8
output["stage"] = 0
elif (env2 == 0):
self.state = 44
output["loc"] = 0
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 49:
if (env2 == 7):
self.state = 51
output["loc"] = 0
output["stage"] = 0
elif (env2 == 5):
self.state = 42
output["loc"] = 8
output["stage"] = 0
elif (env2 == 3):
self.state = 3
output["loc"] = 8
output["stage"] = 0
elif (env2 == 4):
self.state = 4
output["loc"] = 8
output["stage"] = 0
elif (env2 == 6):
self.state = 43
output["loc"] = 8
output["stage"] = 0
else:
self._error(env2)
elif self.state == 50:
if (env2 == 1):
self.state = 48
output["loc"] = 0
output["stage"] = 0
elif (env2 == 5):
self.state = 42
output["loc"] = 8
output["stage"] = 0
elif (env2 == 3):
self.state = 3
output["loc"] = 8
output["stage"] = 0
elif (env2 == 4):
self.state = 4
output["loc"] = 8
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 51:
if (env2 == 6):
self.state = 43
output["loc"] = 8
output["stage"] = 0
elif (env2 == 5):
self.state = 42
output["loc"] = 8
output["stage"] = 0
elif (env2 == 7):
self.state = 51
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 52:
if (env2 == 0):
self.state = 44
output["loc"] = 0
output["stage"] = 0
elif (env2 == 4):
self.state = 45
output["loc"] = 0
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
elif (env2 == 6):
self.state = 47
output["loc"] = 0
output["stage"] = 0
elif (env2 == 1):
self.state = 48
output["loc"] = 0
output["stage"] = 0
elif (env2 == 5):
self.state = 49
output["loc"] = 0
output["stage"] = 0
elif (env2 == 3):
self.state = 50
output["loc"] = 0
output["stage"] = 0
elif (env2 == 7):
self.state = 51
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
else:
raise Exception("Unrecognized internal state: " + str(self.state))
return output
def _error(self, env2):
raise ValueError("Unrecognized input: " + (
"env2 = {env2}; ").format(
env2=env2))
| GaloisInc/planning-synthesis | examples/sitl_client/democontroller1.py | Python | bsd-2-clause | 36,424 | [
"VisIt"
] | f449f3a4ad9ccdecc0272298599a40b36353300a48d8b70e66bb6415b417c531 |
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" myhdl _extractHierarchy module.
"""
import sys
import inspect
from inspect import currentframe, getframeinfo, getouterframes
import re
import string
from types import GeneratorType
import linecache
from myhdl import ExtractHierarchyError, ToVerilogError, ToVHDLError
from myhdl._Signal import _Signal, _isListOfSigs
from myhdl._util import _isGenFunc
from myhdl._misc import _isGenSeq
_profileFunc = None
class _error:
pass
_error.NoInstances = "No instances found"
_error.InconsistentHierarchy = "Inconsistent hierarchy - are all instances returned ?"
_error.InconsistentToplevel = "Inconsistent top level %s for %s - should be 1"
class _Instance(object):
__slots__ = ['level', 'obj', 'subs', 'sigdict', 'memdict', 'name', 'func', 'argdict']
def __init__(self, level, obj, subs, sigdict, memdict, func, argdict):
self.level = level
self.obj = obj
self.subs = subs
self.sigdict = sigdict
self.memdict = memdict
self.func = func
self.argdict = argdict
_memInfoMap = {}
class _MemInfo(object):
__slots__ = ['mem', 'name', 'elObj', 'depth', '_used', '_driven', '_read']
def __init__(self, mem):
self.mem = mem
self.name = None
self.depth = len(mem)
self.elObj = mem[0]
self._used = False
self._driven = None
self._read = None
def _getMemInfo(mem):
return _memInfoMap[id(mem)]
def _makeMemInfo(mem):
key = id(mem)
if key not in _memInfoMap:
_memInfoMap[key] = _MemInfo(mem)
return _memInfoMap[key]
def _isMem(mem):
return id(mem) in _memInfoMap
_userCodeMap = {'verilog' : {},
'vhdl' : {}
}
class _UserCode(object):
__slots__ = ['code', 'namespace', 'funcname', 'func', 'sourcefile', 'sourceline']
def __init__(self, code, namespace, funcname, func, sourcefile, sourceline):
self.code = code
self.namespace = namespace
self.sourcefile = sourcefile
self.func = func
self.funcname = funcname
self.sourceline = sourceline
def __str__(self):
try:
code = self._interpolate()
except:
type, value, tb = sys.exc_info()
info = "in file %s, function %s starting on line %s:\n " % \
(self.sourcefile, self.funcname, self.sourceline)
msg = "%s: %s" % (type, value)
self.raiseError(msg, info)
code = "\n%s\n" % code
return code
def _interpolate(self):
return string.Template(self.code).substitute(self.namespace)
class _UserCodeDepr(_UserCode):
def _interpolate(self):
return self.code % self.namespace
class _UserVerilogCode(_UserCode):
def raiseError(self, msg, info):
raise ToVerilogError("Error in user defined Verilog code", msg, info)
class _UserVhdlCode(_UserCode):
def raiseError(self, msg, info):
raise ToVHDLError("Error in user defined VHDL code", msg, info)
class _UserVerilogCodeDepr(_UserVerilogCode, _UserCodeDepr):
pass
class _UserVhdlCodeDepr(_UserVhdlCode, _UserCodeDepr):
pass
class _UserVerilogInstance(_UserVerilogCode):
def __str__(self):
args = inspect.getargspec(self.func)[0]
s = "%s %s(" % (self.funcname, self.code)
sep = ''
for arg in args:
if arg in self.namespace and isinstance(self.namespace[arg], _Signal):
signame = self.namespace[arg]._name
s += sep
sep = ','
s += "\n .%s(%s)" % (arg, signame)
s += "\n);\n\n"
return s
class _UserVhdlInstance(_UserVhdlCode):
def __str__(self):
args = inspect.getargspec(self.func)[0]
s = "%s: entity work.%s(MyHDL)\n" % (self.code, self.funcname)
s += " port map ("
sep = ''
for arg in args:
if arg in self.namespace and isinstance(self.namespace[arg], _Signal):
signame = self.namespace[arg]._name
s += sep
sep = ','
s += "\n %s=>%s" % (arg, signame)
s += "\n );\n\n"
return s
def _addUserCode(specs, arg, funcname, func, frame):
classMap = {
'__verilog__' : _UserVerilogCodeDepr,
'__vhdl__' :_UserVhdlCodeDepr,
'verilog_code' : _UserVerilogCode,
'vhdl_code' :_UserVhdlCode,
'verilog_instance' : _UserVerilogInstance,
'vhdl_instance' :_UserVhdlInstance,
}
namespace = frame.f_globals.copy()
namespace.update(frame.f_locals)
sourcefile = inspect.getsourcefile(frame)
sourceline = inspect.getsourcelines(frame)[1]
for hdl in _userCodeMap:
oldspec = "__%s__" % hdl
codespec = "%s_code" % hdl
instancespec = "%s_instance" % hdl
spec = None
# XXX add warning logic
if instancespec in specs:
spec = instancespec
elif codespec in specs:
spec = codespec
elif oldspec in specs:
spec = oldspec
if spec:
assert id(arg) not in _userCodeMap[hdl]
code = specs[spec]
_userCodeMap[hdl][id(arg)] = classMap[spec](code, namespace, funcname, func, sourcefile, sourceline)
class _CallFuncVisitor(object):
def __init__(self):
self.linemap = {}
def visitAssign(self, node):
if isinstance(node.expr, ast.CallFunc):
self.lineno = None
self.visit(node.expr)
self.linemap[self.lineno] = node.lineno
def visitName(self, node):
self.lineno = node.lineno
class _HierExtr(object):
def __init__(self, name, dut, *args, **kwargs):
global _profileFunc
_memInfoMap.clear()
for hdl in _userCodeMap:
_userCodeMap[hdl].clear()
self.skipNames = ('always_comb', 'instance', \
'always_seq', '_always_seq_decorator', \
'always', '_always_decorator', \
'instances', \
'processes', 'posedge', 'negedge')
self.skip = 0
self.hierarchy = hierarchy = []
self.absnames = absnames = {}
self.level = 0
_profileFunc = self.extractor
sys.setprofile(_profileFunc)
_top = dut(*args, **kwargs)
sys.setprofile(None)
if not hierarchy:
raise ExtractHierarchyError(_error.NoInstances)
self.top = _top
# streamline hierarchy
hierarchy.reverse()
# walk the hierarchy to define relative and absolute names
names = {}
top_inst = hierarchy[0]
obj, subs = top_inst.obj, top_inst.subs
names[id(obj)] = name
absnames[id(obj)] = name
if not top_inst.level == 1:
raise ExtractHierarchyError(_error.InconsistentToplevel % (top_inst.level, name))
for inst in hierarchy:
obj, subs = inst.obj, inst.subs
if id(obj) not in names:
raise ExtractHierarchyError(_error.InconsistentHierarchy)
inst.name = names[id(obj)]
tn = absnames[id(obj)]
for sn, so in subs:
names[id(so)] = sn
absnames[id(so)] = "%s_%s" % (tn, sn)
if isinstance(so, (tuple, list)):
for i, soi in enumerate(so):
sni = "%s_%s" % (sn, i)
names[id(soi)] = sni
absnames[id(soi)] = "%s_%s_%s" % (tn, sn, i)
def extractor(self, frame, event, arg):
if event == "call":
funcname = frame.f_code.co_name
# skip certain functions
if funcname in self.skipNames:
self.skip +=1
if not self.skip:
self.level += 1
elif event == "return":
funcname = frame.f_code.co_name
func = frame.f_globals.get(funcname)
if func is None:
# Didn't find a func in the global space, try the local "self"
# argument and see if it has a method called *funcname*
obj = frame.f_locals.get('self')
if hasattr(obj, funcname):
func = getattr(obj, funcname)
if not self.skip:
isGenSeq = _isGenSeq(arg)
if isGenSeq:
specs = {}
for hdl in _userCodeMap:
spec = "__%s__" % hdl
if spec in frame.f_locals and frame.f_locals[spec]:
specs[spec] = frame.f_locals[spec]
spec = "%s_code" % hdl
if func and hasattr(func, spec) and getattr(func, spec):
specs[spec] = getattr(func, spec)
spec = "%s_instance" % hdl
if func and hasattr(func, spec) and getattr(func, spec):
specs[spec] = getattr(func, spec)
if specs:
_addUserCode(specs, arg, funcname, func, frame)
# building hierarchy only makes sense if there are generators
if isGenSeq and arg:
sigdict = {}
memdict = {}
argdict = {}
if func:
arglist = inspect.getargspec(func).args
else:
arglist = []
cellvars = frame.f_code.co_cellvars
for dict in (frame.f_globals, frame.f_locals):
for n, v in dict.items():
# extract signals and memories
# also keep track of whether they are used in generators
# only include objects that are used in generators
## if not n in cellvars:
## continue
if isinstance(v, _Signal):
sigdict[n] = v
if n in cellvars:
v._markUsed()
if _isListOfSigs(v):
m = _makeMemInfo(v)
memdict[n] = m
if n in cellvars:
m._used = True
# save any other variable in argdict
if (n in arglist) and (n not in sigdict) and (n not in memdict):
argdict[n] = v
subs = []
for n, sub in frame.f_locals.items():
for elt in _inferArgs(arg):
if elt is sub:
subs.append((n, sub))
inst = _Instance(self.level, arg, subs, sigdict, memdict, func, argdict)
self.hierarchy.append(inst)
self.level -= 1
if funcname in self.skipNames:
self.skip -= 1
def _inferArgs(arg):
c = [arg]
if isinstance(arg, (tuple, list)):
c += list(arg)
return c
| cordoval/myhdl-python | myhdl/_extractHierarchy.py | Python | lgpl-2.1 | 12,586 | [
"VisIt"
] | dd820d639d48be45b1176499ac6e8ee7a9f4673bf4c0719a5bcd7007ebe4dde6 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import sys
import math
try:
import cPickle as pickle
except ImportError:
import pickle
import itertools
from collections import OrderedDict
from .exceptions import *
from .molecule import Molecule
from .modelchems import Method, BasisSet, Error, methods, bases, errors, pubs
from . import psiutil
from . import textables
def initialize_errors():
"""Form OrderedDict of all possible statistical measures set to None"""
error = OrderedDict()
for e in ['e', 'pe', 'pbe', 'pce']:
for m in ['pex', 'nex', 'max', 'min', 'm', 'ma', 'rms', 'std']:
error[m + e] = None
return error
def initialize_errors_elaborate(e=None, pe=None, pbe=None, pce=None, extrema=True):
error = OrderedDict()
error['maxe'] = None if (e is None or not extrema) else e # LD_XA
error['mine'] = None if (e is None or not extrema) else e # LD_XI
error['me'] = None if e is None else 0.0 # LD_MS
error['mae'] = None if e is None else 0.0 # LD_MA
error['rmse'] = None if e is None else 0.0 # LD_RA
error['stde'] = None if e is None else 0.0
error['maxpe'] = None if (pe is None or not extrema) else pe # FD_XA
error['minpe'] = None if (pe is None or not extrema) else pe # FD_XI
error['mpe'] = None if pe is None else 0.0 # FD_MS
error['mape'] = None if pe is None else 0.0 # FD_MA
error['rmspe'] = None if pe is None else 0.0 # FD_RA
error['stdpe'] = None if pe is None else 0.0
error['maxpbe'] = None if (pbe is None or not extrema) else pbe # BD_XA
error['minpbe'] = None if (pbe is None or not extrema) else pbe # BD_XI
error['mpbe'] = None if pbe is None else 0.0 # BD_MS
error['mapbe'] = None if pbe is None else 0.0 # BD_MA
error['rmspbe'] = None if pbe is None else 0.0 # BD_RA
error['stdpbe'] = None if pbe is None else 0.0
error['maxpce'] = None if (pce is None or not extrema) else pce # BD_XA
error['minpce'] = None if (pce is None or not extrema) else pce # BD_XI
error['mpce'] = None if pce is None else 0.0 # BD_MS
error['mapce'] = None if pce is None else 0.0 # BD_MA
error['rmspce'] = None if pce is None else 0.0 # BD_RA
error['stdpce'] = None if pce is None else 0.0
return error
def average_errors(*args):
"""Each item in *args* should be an error dictionary. Performs
average-like operation over all items, which should be error
dictionaries, in *args*. Defined for ME, MAE, STDE, and their
relative-error variants. None returned for undefined statistics or
when an item is missing.
"""
Ndb = float(len(args))
avgerror = initialize_errors()
try:
avgerror['pexe'] = max([x['pexe'] for x in args])
avgerror['nexe'] = min([x['nexe'] for x in args])
avgerror['maxe'] = max([x['maxe'] for x in args], key=lambda x: abs(x))
avgerror['mine'] = min([x['mine'] for x in args], key=lambda x: abs(x))
avgerror['me'] = sum([x['me'] for x in args]) / Ndb
avgerror['mae'] = sum([x['mae'] for x in args]) / Ndb
avgerror['rmse'] = sum([x['rmse'] for x in args]) / Ndb # TODO: unsure of op validity
avgerror['stde'] = math.sqrt(sum([x['stde'] ** 2 for x in args]) / Ndb)
avgerror['pexpe'] = max([x['pexpe'] for x in args])
avgerror['nexpe'] = min([x['nexpe'] for x in args])
avgerror['maxpe'] = max([x['maxpe'] for x in args], key=lambda x: abs(x))
avgerror['minpe'] = min([x['minpe'] for x in args], key=lambda x: abs(x))
avgerror['mpe'] = sum([x['mpe'] for x in args]) / Ndb
avgerror['mape'] = sum([x['mape'] for x in args]) / Ndb
avgerror['rmspe'] = sum([x['rmspe'] for x in args]) / Ndb # TODO: unsure of op validity
avgerror['stdpe'] = math.sqrt(sum([x['stdpe'] * x['stdpe'] for x in args]) / Ndb)
avgerror['pexpbe'] = max([x['pexpbe'] for x in args])
avgerror['nexpbe'] = min([x['nexpbe'] for x in args])
avgerror['maxpbe'] = max([x['maxpbe'] for x in args], key=lambda x: abs(x))
avgerror['minpbe'] = min([x['minpbe'] for x in args], key=lambda x: abs(x))
avgerror['mpbe'] = sum([x['mpbe'] for x in args]) / Ndb
avgerror['mapbe'] = sum([x['mapbe'] for x in args]) / Ndb
avgerror['rmspbe'] = sum([x['rmspbe'] for x in args]) / Ndb # TODO: unsure of op validity
avgerror['stdpbe'] = math.sqrt(sum([x['stdpbe'] * x['stdpbe'] for x in args]) / Ndb)
avgerror['pexpce'] = max([x['pexpce'] for x in args])
avgerror['nexpce'] = min([x['nexpce'] for x in args])
avgerror['maxpce'] = max([x['maxpce'] for x in args], key=lambda x: abs(x))
avgerror['minpce'] = min([x['minpce'] for x in args], key=lambda x: abs(x))
avgerror['mpce'] = sum([x['mpce'] for x in args]) / Ndb
avgerror['mapce'] = sum([x['mapce'] for x in args]) / Ndb
avgerror['rmspce'] = sum([x['rmspce'] for x in args]) / Ndb # TODO: unsure of op validity
avgerror['stdpce'] = math.sqrt(sum([x['stdpce'] * x['stdpce'] for x in args]) / Ndb)
except TypeError:
pass
return avgerror
def format_errors(err, mode=1):
"""From error dictionary *err*, returns a LaTeX-formatted string,
after handling None entries.
"""
onedecimal = r"""{0:8.1f}"""
twodecimal = r"""{0:8.2f}"""
threedecimal = r"""{0:12.3f}"""
fourdecimal = r"""{0:12.4f}"""
shortblank = r"""{0:8s}""".format('')
longblank = r"""{0:12s}""".format('')
if mode == 1:
me = ' ----' if err['me'] is None else '%+.2f' % (err['me'])
stde = '----' if err['stde'] is None else '%.2f' % (err['stde'])
mae = ' ----' if err['mae'] is None else '%6.2f' % (err['mae'])
mape = ' ---- ' if err['mape'] is None else '{:6.1f}%'.format(100 * err['mape'])
mapbe = ' ---- ' if err['mapbe'] is None else '{:6.1f}%'.format(100 * err['mapbe'])
mapce = ' ---- ' if err['mapce'] is None else '{:6.1f}%'.format(100 * err['mapce'])
text = r"""$\{%s; %s\}$ %s %s %s""" % \
(me, stde, mae, mape, mapce)
return text
if mode == 2:
sdict = OrderedDict()
for lbl in ['pexe', 'nexe', 'maxe', 'mine', 'me', 'mae', 'rmse', 'stde']:
sdict[lbl] = ' ----' if err[lbl] is None else fourdecimal.format(err[lbl])
for lbl in ['pexpe', 'nexpe', 'maxpe', 'minpe', 'mpe', 'mape', 'rmspe', 'stdpe',
'pexpbe', 'nexpbe', 'maxpbe', 'minpbe', 'mpbe', 'mapbe', 'rmspbe', 'stdpbe',
'pexpce', 'nexpce', 'maxpce', 'minpce', 'mpce', 'mapce', 'rmspce', 'stdpce']:
sdict[lbl] = ' ----' if err[lbl] is None else threedecimal.format(100 * err[lbl])
text = """nex: {nexe}{nexpe}{nexpbe}{nexpce}\n""" \
"""pex: {pexe}{pexpe}{pexpbe}{pexpce}\n""" \
"""min: {mine}{minpe}{minpbe}{minpce}\n""" \
"""max: {maxe}{maxpe}{maxpbe}{maxpce}\n""" \
"""m: {me}{mpe}{mpbe}{mpce}\n""" \
"""ma: {mae}{mape}{mapbe}{mapce}\n""" \
"""rms: {rmse}{rmspe}{rmspbe}{rmspce}\n""" \
"""std: {stde}{stdpe}{stdpbe}{stdpce}\n""".format(**sdict)
return text
if mode == 3:
sdict = OrderedDict()
# shortblanks changed from empty strings Aug 2015
for lbl in ['pexe', 'nexe', 'maxe', 'mine', 'me', 'mae', 'rmse', 'stde']:
sdict[lbl] = shortblank if err[lbl] is None else twodecimal.format(err[lbl])
for lbl in ['pexpe', 'nexpe', 'maxpe', 'minpe', 'mpe', 'mape', 'rmspe', 'stdpe',
'pexpbe', 'nexpbe', 'maxpbe', 'minpbe', 'mpbe', 'mapbe', 'rmspbe', 'stdpbe',
'pexpce', 'nexpce', 'maxpce', 'minpce', 'mpce', 'mapce', 'rmspce', 'stdpce']:
sdict[lbl] = shortblank if err[lbl] is None else onedecimal.format(100 * err[lbl])
return sdict
def string_contrast(ss):
"""From an array of strings, *ss*, returns maximum common prefix
string, maximum common suffix string, and array of middles.
"""
s = [item + 'q' for item in ss if item is not None]
short = min(s, key=len)
for ib in range(len(short)):
if not all([mc[ib] == short[ib] for mc in s]):
preidx = ib
break
else:
preidx = 0
for ib in range(len(short)):
ie = -1 * (ib + 1)
if not all([mc[ie] == short[ie] for mc in s]):
sufidx = ie + 1
break
else:
sufidx = -1 * (len(short))
miditer = iter([mc[preidx:sufidx] for mc in s])
prefix = short[:preidx]
suffix = short[sufidx:-1]
middle = ['' if mc is None else next(miditer) for mc in ss]
return prefix, suffix, middle
def oxcom(lst):
"""Returns gramatical comma separated string of *lst*."""
lst = [str(l) for l in lst]
if not lst:
return ''
elif len(lst) == 1:
return lst[0]
elif len(lst) == 2:
return ' and '.join(lst)
else:
return ', and '.join([', '.join(lst[:-1]), lst[-1]])
def cure_weight(refrxn, refeq, rrat, xi=0.2):
"""
:param refeq: value of benchmark for equilibrium Reaction
:param rrat: ratio of intermonomer separation for Reaction to equilibrium Reaction
:param xi: parameter
:return: weight for CURE
"""
sigma = xi * abs(refeq) / (rrat ** 3)
weight = max(abs(refrxn), sigma)
return weight
def balanced_error(refrxn, refeq, rrat, m=0.03, p=10.0):
"""
:param refrxn:
:param refeq:
:param rrat:
:param m: minimum permitted weight for a point
:param p: multiples of abs(refeq) above refeq to which zero-line in head is displaced
:return:
"""
one = float(1)
q = one if rrat >= one else p
qm1perat = q - 1 + refrxn / refeq
weight = max(m, qm1perat / q)
mask = weight * q / abs(qm1perat)
return mask, weight
def fancify_mc_tag(mc, latex=False):
"""From the usual MTD-opt1_opt2-bas model chemistry identifier, return
string based on fullname, if *latex* is False or latex if *latex* is True.
"""
try:
mtd, mod, bas = mc.split('-')
except ValueError:
text = mc
else:
if latex:
text = r"""%20s / %-20s %s""" % (methods[mtd].latex, bases[bas].latex, mod)
else:
text = r"""%20s / %s, %s""" % (methods[mtd].fullname, bases[bas].fullname, mod)
return text
class ReactionDatum(object):
"""Piece of quantum chemical information that describes a qcdb.Reaction object.
"""
def __init__(self, dbse, rxn, method, mode, basis, value, units='kcal/mol', citation=None, doi=None, comment=None):
# geometry
self.dbrxn = dbse + '-' + str(rxn)
# qcdb.Method
self.method = method
# mode, e.g., unCP, CP, RLX, etc.
self.mode = mode
# qcdb.BasisSet
self.basis = basis
# numerical value for reaction
self.value = float(value)
# energy unit attached to value, defaults to kcal/mol
self.units = units
# publication citation of value
self.citation = citation
# digital object identifier for publication (maybe this should be doi of datum, not of pub?)
self.doi = doi
# addl comments
self.comment = comment
@classmethod
def library_modelchem(cls, dbse, rxn, method, mode, basis, value, units='kcal/mol', citation=None, doi=None,
comment=None):
"""Constructor when method and basis are strings corresponding to
qcdb.Method and qcdb.BasisSet already defined in methods and bases.
"""
# computational method
try:
tmp_method = methods[method.upper()]
except KeyError as e:
raise ValidationError("""Invalid ReactionDatum method %s: %s""" % (method, e))
# computational basis set
try:
tmp_basis = bases[basis.lower()]
except KeyError as e:
raise ValidationError("""Invalid ReactionDatum basis %s: %s""" % (basis, e))
# publication
if citation is None:
tmp_pub = citation
else:
try:
tmp_pub = pubs[citation.lower()]
except KeyError as e:
raise ValidationError("""Invalid ReactionDatum publication %s: %s""" % (citation, e))
return cls(dbse, rxn, tmp_method, mode, tmp_basis, value, units, citation=tmp_pub, doi=doi, comment=comment)
def __str__(self):
text = ''
text += """ ==> ReactionDatum <==\n\n"""
text += """ Database reaction: %s\n""" % (self.dbrxn)
text += """ Method: %s\n""" % (self.method.fullname)
text += """ Mode: %s\n""" % (self.mode)
text += """ Basis: %s\n""" % (self.basis.fullname)
text += """ Value: %f [%s]\n""" % (self.value, self.units)
text += """ Citation: %s %s\n""" % (self.citation.name, self.citation.doi)
text += """ DOI: %s\n""" % (self.doi)
text += """ Comment: %s\n""" % (self.comment)
text += """\n"""
return text
class Subset(object):
"""Affiliated qcdb.Reaction-s
"""
def __init__(self, name, hrxn, tagl=None, axis=None):
# identifier
self.name = name
# array of reactions names
self.hrxn = hrxn
# description line
self.tagl = tagl
# mathematical relationships of reactions
self.axis = OrderedDict()
def __str__(self):
text = ''
text += """ ==> %s Subset <==\n\n""" % (self.name)
text += """ Tagline: %s\n""" % (self.tagl)
text += """ %20s""" % ('Reactions')
for ax in self.axis.keys():
text += """ %8s""" % (ax)
text += """\n"""
for ix in range(len(self.hrxn)):
text += """ %20s""" % (str(self.hrxn[ix]))
for ax in self.axis.values():
text += """ %8.3f""" % (ax[ix])
text += """\n"""
text += """\n"""
return text
class Reagent(object):
"""Chemical entity only slightly dresed up from qcdb.Molecule.
"""
def __init__(self, name, mol, tagl=None, comment=None):
# full name, e.g., 'S22-2-dimer' or 'NBC1-BzMe-8.0-monoA-CP' or 'HTBH-HCl-reagent'
self.name = name
# qcdb.Molecule
try:
self.NRE = mol.nuclear_repulsion_energy()
except AttributeError:
raise ValidationError("""Reagent must be instantiated with qcdb.Molecule object.""")
else:
self.mol = mol.create_psi4_string_from_molecule()
# description line
self.tagl = tagl
# # addl comments
# self.comment = comment
# # fragmentation
# self.fragments = mol.fragments
# # frag activation
# self.frtype = mol.fragment_types
# # frag charge
# self.frchg = mol.fragment_charges
# # frag multiplicity
# self.frmult = mol.fragment_multiplicities
self.charge = mol.molecular_charge()
def __str__(self):
text = ''
text += """ ==> %s Reagent <==\n\n""" % (self.name)
text += """ Tagline: %s\n""" % (self.tagl)
# text += """ Comment: %s\n""" % (self.comment)
text += """ NRE: %f\n""" % (self.NRE)
# text += """ Charge: %+d\n"""
# text += """ Fragments: %d\n""" % (len(self.fragments))
# text += """ FrgNo Actv Chg Mult AtomRange\n"""
# for fr in range(len(self.fragments)):
# text += """ %-4d %1s %+2d %2d %s\n""" % (fr + 1,
# '*' if self.frtype[fr] == 'Real' else '',
# self.frchg[fr], self.frmult[fr], self.fragments[fr])
text += """ Molecule: \n%s""" % (self.mol)
text += """\n"""
return text
class Reaction(object):
"""
"""
def __init__(self, name, dbse, indx, tagl=None, latex=None, color='black', comment=None):
# name, e.g., '2' or 'BzMe-8.0'
self.name = name
# database reaction name, e.g., 'S22-2' or 'NBC1-BzMe-8.0'
self.dbrxn = dbse + '-' + str(name)
# numerical index of reaction
self.indx = indx
# description line
self.tagl = tagl
# latex description
self.latex = latex
# addl comments
self.comment = comment
# reaction matrices, specifying reagent contributions per reaction
self.rxnm = {}
# qcdb.ReactionDatum objects of quantum chemical data pertaining to reaction
self.data = {}
# benchmark qcdb.ReactionDatum
self.benchmark = None
# color for plotting
self.color = color
def __str__(self):
text = ''
text += """ ==> %s Reaction <==\n\n""" % (self.name)
text += """ Database reaction: %s\n""" % (self.dbrxn)
text += """ Index: %s\n""" % (self.indx)
text += """ LaTeX representation: %s\n""" % (self.latex)
text += """ Tagline: %s\n""" % (self.tagl)
text += """ Comment: %s\n""" % (self.comment)
if self.benchmark is None:
text += """ Benchmark: %s\n""" % ('UNDEFINED')
else:
text += """ Benchmark: %f\n""" % (self.data[self.benchmark].value)
text += """ Color: %s\n""" % (str(self.color))
text += """ Reaction matrix:\n"""
for mode, rxnm in self.rxnm.items():
text += """ %s\n""" % (mode)
for rgt, coeff in rxnm.items():
text += """ %3d %s\n""" % (coeff, rgt.name)
text += """ Data:\n"""
for label, datum in sorted(self.data.items()):
text += """ %8.2f %s\n""" % (datum.value, label)
text += """\n"""
return text
def compute_errors(self, benchmark='default', mcset='default', failoninc=True, verbose=False):
"""For all data or modelchem subset *mcset*, computes raw reaction
errors between *modelchem* and *benchmark* model chemistries.
Returns error if model chemistries are missing for any reaction in
subset unless *failoninc* set to False, whereupon returns partial.
Returns dictionary of reaction labels and error forms.
"""
if mcset == 'default':
lsslist = self.data.keys()
elif callable(mcset):
# mcset is function that will generate subset of HRXN from sset(self)
lsslist = [mc for mc in self.data.keys() if mc in mcset(self)] # untested
else:
# mcset is array containing modelchemistries
lsslist = [mc for mc in self.data.keys() if mc in mcset]
# assemble dict of qcdb.Reaction objects from array of reaction names
lsset = OrderedDict()
for mc in lsslist:
lsset[mc] = self.data[mc]
lbench = self.benchmark if benchmark == 'default' else benchmark
try:
mcGreater = self.data[lbench].value
except KeyError as e:
raise ValidationError("""Reaction %s missing benchmark datum %s.""" % (self.name, str(e)))
err = {}
for label, datum in lsset.items():
try:
mcLesser = datum.value
except KeyError as e:
if failoninc:
raise ValidationError("""Reaction %s missing datum %s.""" % (label, str(e)))
else:
continue
err[label] = [mcLesser - mcGreater,
(mcLesser - mcGreater) / abs(mcGreater),
(mcLesser - mcGreater) / abs(mcGreater)] # TODO define BER
if verbose:
print("""p = %6.2f, pe = %6.1f%%, bpe = %6.1f%% modelchem %s.""" %
(err[label][0], 100 * err[label][1], 100 * err[label][2], label))
return err
def plot(self, benchmark='default', mcset='default',
failoninc=True, verbose=False, color='sapt',
xlimit=4.0, labeled=True, view=True,
mousetext=None, mouselink=None, mouseimag=None, mousetitle=None, mousediv=None,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Computes individual errors over model chemistries in *mcset* (which
may be default or an array or a function generating an array) versus
*benchmark*. Thread *color* can be 'rgb' for old coloring, a color
name or 'sapt' for spectrum coloring.
*saveas* conveys directory ('/') and/or filename for saving the
resulting plot. File extension is not accessible, but *graphicsformat*
array requests among 'png', 'pdf', and 'eps' formats. *relpath*
forces paths to saved files to be relative to current directory,
rather than absolute paths for returned code and file dictionary.
Prepares thread diagram instructions and either executes them if
matplotlib available (Canopy or Anaconda) or prints them. Returns a
dictionary of all saved plot filenames. If any of *mousetext*, *mouselink*,
or *mouseimag* is specified, htmlcode will be returned with an image map of
slats to any of text, link, or image, respectively.
"""
# compute errors
dbse = self.dbrxn.split('-')[0]
indiv = self.compute_errors(benchmark=benchmark, mcset=mcset,
failoninc=failoninc, verbose=verbose)
# repackage
dbdat = []
for mc in indiv.keys():
dbdat.append({'db': dbse,
'show': fancify_mc_tag(mc),
'sys': mc,
'color': self.color,
'data': [indiv[mc][0]]})
mae = None # [errors[ix][self.dbse]['mae'] for ix in index]
mape = None # [100 * errors[ix][self.dbse]['mape'] for ix in index]
# form unique filename
# ixpre, ixsuf, ixmid = string_contrast(index)
# title = self.dbse + ' ' + ixpre + '[]' + ixsuf
title = self.dbrxn
labels = ['']
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict, htmlcode = mpl.threads(%s,\n color='%s',\n title='%s',\n labels=%s,\n mae=%s,\n mape=%s\n xlimit=%s\n labeled=%s\n saveas=%s\n mousetext=%s\n mouselink=%s\n mouseimag=%s\n mousetitle=%s,\n mousediv=%s,\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, color, title, labels, mae, mape, str(xlimit),
repr(labeled), repr(saveas), repr(mousetext), repr(mouselink), repr(mouseimag),
repr(mousetitle), repr(mousediv), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict, htmlcode = mpl.threads(dbdat, color=color, title=title, labels=labels, mae=mae, mape=mape,
xlimit=xlimit, labeled=labeled, view=view,
mousetext=mousetext, mouselink=mouselink,
mouseimag=mouseimag, mousetitle=mousetitle, mousediv=mousediv,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict, htmlcode
class WrappedDatabase(object):
"""Wrapper class for raw Psi4 database modules that does some validation
of contents, creates member data and accessors for database structures,
defines error computation, and handles database subsets. Not to be used
directly-- see qcdb.Database for handling single or multiple
qdcb.WrappedDatabase objects and defining nice statistics, plotting, and
table functionalities.
>>> asdf = qcdb.WrappedDatabase('Nbc10')
"""
def __init__(self, dbname, pythonpath=None):
"""Instantiate class with case insensitive name *dbname*. Module
search path can be prepended with *pythonpath*.
"""
#: internal name of database
#:
#: >>> print asdf.dbse
#: 'NBC1'
self.dbse = None
#: description line
#:
#: >>> print asdf.tagl
#: 'interaction energies of dissociation curves for non-bonded systems'
self.tagl = None
#: OrderedDict of reactions/members
#:
#: >>> print asdf.hrxn.keys()
#: ['BzBz_S-3.2', 'BzBz_S-3.3', ... 'BzBz_PD36-2.8', 'BzBz_PD36-3.0']
self.hrxn = None
#: dict of reagents/geometries
#:
#: >>> print asdf.hrgt.keys()
#: ['NBC1-BzBz_PD32-0.8-monoA-CP', 'NBC1-BzBz_PD34-0.6-dimer', ... 'NBC1-BzBz_PD34-1.7-dimer']
self.hrgt = None
#: dict of defined reaction subsets.
#: Note that self.sset['default'] contains all the nonredundant information.
#:
#: >>> print asdf.sset.keys()
#: ['meme', 'mxddpp', '5min', ... 'small']
self.sset = None
# Removing hrxn, hrgt etc. do not reduce the size of the object.
# These attributes are stored for ease of access for adding qc info, etc.
#: object of defined reaction subsets.
self.oss = None
# load database
if pythonpath is not None:
sys.path.insert(1, pythonpath)
else:
sys.path.append(os.path.dirname(__file__) + '/../databases')
database = psiutil.import_ignorecase(dbname)
if not database:
print('\nPython module for database %s failed to load\n\n' % (dbname))
print('\nSearch path that was tried:\n')
print(", ".join(map(str, sys.path)))
raise ValidationError("Python module loading problem for database " + str(dbname))
# gross validation of database
for item in ['dbse', 'GEOS', 'HRXN', 'ACTV', 'RXNM']:
try:
getattr(database, item)
except AttributeError:
raise ValidationError("""Database %s severely deformed with %s missing.""" % (database.__name__, item))
for item in ['TAGL', 'BIND']:
try:
getattr(database, item)
except AttributeError:
print("""Warning: Database %s possibly deformed with %s missing.\n""" % (database.__name__, item))
# form database name
self.dbse = database.dbse
try:
self.tagl = database.TAGL['dbse']
except KeyError:
print("""Warning: TAGL missing for database %s""" % (self.dbse))
# form array of database contents to process through
pieces = []
for item in dir(database):
if item in ['qcdb', 'rxn', 'dbse', 'TAGL']:
pass
elif item.startswith('__'):
pass
else:
pieces.append(item)
# form qcdb.Reagent objects from all defined geometries, GEOS
oHRGT = {}
for rgt, mol in database.GEOS.items():
mol.update_geometry()
try:
tagl = database.TAGL[rgt]
except KeyError:
tagl = None
print("""Warning: TAGL missing for reagent %s""" % (rgt))
oHRGT[rgt] = Reagent(name=rgt, mol=mol, tagl=tagl)
pieces.remove('GEOS')
self.hrgt = oHRGT
# form qcdb.Reaction objects from comprehensive reaction list, HRXN
oHRXN = OrderedDict()
for rxn in database.HRXN:
try:
tagl = database.TAGL[database.dbse + '-' + str(rxn)]
except KeyError:
tagl = None
print("""Warning: TAGL missing for reaction %s""" % (rxn))
try:
elst = database.DATA['SAPT ELST ENERGY'][database.dbse + '-' + str(rxn)]
disp = database.DATA['SAPT DISP ENERGY'][database.dbse + '-' + str(rxn)]
color = abs(elst) / (abs(elst) + abs(disp))
except (KeyError, AttributeError):
color = 'black'
print("""Warning: DATA['SAPT * ENERGY'] missing for reaction %s""" % (rxn))
oHRXN[rxn] = Reaction(name=rxn,
dbse=database.dbse,
indx=database.HRXN.index(rxn) + 1,
color=color,
tagl=tagl)
pieces.remove('HRXN')
self.hrxn = oHRXN
# list and align database stoichiometry modes, ACTV* and RXNM*
oACTV = {}
for modactv in [item for item in pieces if item.startswith('ACTV')]:
modrxnm = modactv.replace('ACTV', 'RXNM')
mode = 'default' if modactv == 'ACTV' else modactv.replace('ACTV_', '')
try:
getattr(database, modrxnm)
except AttributeError:
modrxnm = 'RXNM'
oACTV[mode] = [modactv, modrxnm]
for item in [tmp for tmp in pieces if tmp.startswith('ACTV') or tmp.startswith('RXNM')]:
pieces.remove(item)
# populate reaction matrices in qcdb.Reaction objects
for rxn in database.HRXN:
dbrxn = database.dbse + '-' + str(rxn)
for mode, actvrxnm in oACTV.items():
tdict = OrderedDict()
for rgt in getattr(database, actvrxnm[0])[dbrxn]:
tdict[oHRGT[rgt]] = getattr(database, actvrxnm[1])[dbrxn][rgt]
oHRXN[rxn].rxnm[mode] = tdict
# list embedded quantum chem info per rxn, incl. BIND*
arrsbind = [item for item in pieces if item.startswith('BIND_')]
if len(arrsbind) == 0:
if 'BIND' in pieces:
arrsbind = ['BIND']
else:
arrsbind = []
print("""Warning: No BIND array with reference values.""")
else:
for arrbind in arrsbind:
if getattr(database, arrbind) is database.BIND:
break
else:
print("""Warning: No BIND_* array assigned to be master BIND.""")
oBIND = {}
for arrbind in arrsbind:
ref = database.dbse + 'REF' if arrbind == 'BIND' else arrbind.replace('BIND_', '')
methods[ref] = Method(name=ref)
bases[ref] = BasisSet(name=ref)
try:
getattr(database, 'BINDINFO_' + ref)
except AttributeError:
arrbindinfo = None
print("""Warning: No BINDINFO dict with BIND attribution and modelchem for %s.""" % (ref))
else:
arrbindinfo = 'BINDINFO_' + ref
oBIND[ref] = [methods[ref], 'default', bases[ref], arrbind,
(getattr(database, arrbind) is database.BIND),
arrbindinfo]
for item in [tmp for tmp in pieces if tmp.startswith('BIND')]:
pieces.remove(item)
# populate data with reference values in qcdb.Reaction objects
for rxn in database.HRXN:
dbrxn = database.dbse + '-' + str(rxn)
for ref, info in oBIND.items():
bindval = getattr(database, info[3])[dbrxn]
if info[5] is None:
methodfeed = info[0]
modefeed = info[1]
basisfeed = info[2]
citationkey = 'anon'
else:
bindinforxn = getattr(database, info[5])[dbrxn]
methodfeed = methods[bindinforxn['method'].upper()] if 'method' in bindinforxn else info[0]
modefeed = bindinforxn['mode'] if 'mode' in bindinforxn else info[1]
basisfeed = bases[bindinforxn['basis'].lower()] if 'basis' in bindinforxn else info[2]
citationkey = bindinforxn['citation'].lower() if 'citation' in bindinforxn else 'anon'
citationfeed = pubs[citationkey]
if bindval is not None:
oHRXN[rxn].data[ref] = ReactionDatum(dbse=database.dbse, rxn=rxn,
method=methodfeed, mode=modefeed,
basis=basisfeed, citation=citationfeed,
value=bindval)
# oHRXN[rxn].data[ref] = ReactionDatum(dbse=database.dbse,
# rxn=rxn,
# method=info[0],
# mode=info[1],
# basis=info[2],
# value=bindval)
# #value=getattr(database, info[3])[dbrxn])
if info[4]:
oHRXN[rxn].benchmark = ref
# Process subsets
oSSET = {}
fsHRXN = frozenset(database.HRXN)
for sset in pieces:
if not sset.startswith('AXIS_'):
try:
fssset = frozenset(getattr(database, sset))
except TypeError:
continue
if fssset.issubset(fsHRXN):
oSSET[sset] = getattr(database, sset)
for item in oSSET.keys():
pieces.remove(item)
oSSET['HRXN'] = database.HRXN
self.sset = OrderedDict()
self.oss = OrderedDict() # just in case oss replaces sset someday
for item in oSSET.keys():
if item == 'HRXN_SM':
label = 'small'
elif item == 'HRXN_LG':
label = 'large'
elif item == 'HRXN_EQ':
label = 'equilibrium'
elif item == 'HRXN':
label = 'default'
elif item.startswith('HRXN_'):
label = item.replace('HRXN_', '').lower()
else:
label = item.lower()
# subsets may have different ordering from HRXN
self.sset[label] = OrderedDict()
for rxn in oSSET[item]:
self.sset[label][rxn] = oHRXN[rxn]
# initialize subset objects with light info
try:
sstagl = database.TAGL[item]
except KeyError:
try:
sstagl = database.TAGL[label]
except KeyError:
sstagl = None
print("""Warning: TAGL missing for subset %s""" % (label))
self.oss[label] = Subset(name=label,
hrxn=self.sset[label].keys(),
tagl=sstagl)
# Process axes
for axis in [item for item in pieces if item.startswith('AXIS_')]:
label = axis.replace('AXIS_', '')
try:
defn = getattr(database, axis)
except AttributeError:
raise ValidationError("""Axis %s not importable.""" % (label))
axisrxns = frozenset(defn.keys())
attached = False
for ss, rxns in self.sset.items():
if frozenset(rxns).issubset(axisrxns):
ordered_floats = []
for rx in self.oss[ss].hrxn:
ordered_floats.append(defn[rx])
self.oss[ss].axis[label] = ordered_floats
attached = True
if not attached:
print("""Warning: AXIS %s not affiliated with a subset""" % (label))
pieces.remove(axis)
print("""WrappedDatabase %s: Unparsed attributes""" % (self.dbse), pieces)
def __str__(self):
text = ''
text += """ ==> %s WrappedDatabase <==\n\n""" % (self.dbse)
text += """ Reagents: %s\n""" % (self.hrgt.keys())
text += """ Reactions: %s\n""" % (self.hrxn.keys())
text += """ Subsets: %s\n""" % (self.sset.keys())
text += """ Reference: %s\n""" % (self.benchmark())
text += """\n"""
return text
def add_ReactionDatum(self, dbse, rxn, method, mode, basis, value, units='kcal/mol', citation=None, comment=None,
overwrite=False):
"""Add a new quantum chemical value to *rxn* by creating a
qcdb.ReactionDatum from same arguments as that class's
object-less constructor. *rxn* may be actual Reaction.name
or Reaction.indx.
"""
if (self.dbse == dbse):
if rxn in self.hrxn:
rxnname = rxn # rxn is proper reaction name
else:
try:
if (rxn + 1 > 0) and (rxn == self.hrxn.items()[rxn - 1][1].indx):
rxnname = self.hrxn.items()[rxn - 1][1].name # rxn is reaction index (maybe dangerous?)
except (TypeError, IndexError):
raise ValidationError(
"""Inconsistent to add ReactionDatum for %s to database %s with reactions %s.""" %
(dbse + '-' + str(rxn), self.dbse, self.hrxn.keys()))
label = '-'.join([method, mode, basis])
if overwrite or (label not in self.hrxn[rxnname].data):
self.hrxn[rxnname].data[label] = ReactionDatum.library_modelchem(dbse=dbse, rxn=rxnname,
method=method, mode=mode, basis=basis,
value=value, units=units,
comment=comment, citation=citation)
else:
raise ValidationError("""ReactionDatum %s already present in Database.""" % (label))
else:
raise ValidationError("""Inconsistent to add ReactionDatum for %s to database %s.""" %
(dbse + '-' + str(rxn), self.dbse))
def add_Subset(self, name, func):
"""Define a new subset labeled *name* by providing a function
*func* that filters *self.hrxn*.
"""
sname = name.lower().split('\n')
label = sname.pop(0)
tagl = sname[0].strip() if sname else None
try:
filtered = func(self)
lsslist = [rxn for rxn in self.sset['default'].keys() if rxn in filtered]
except TypeError as e:
raise ValidationError("""Function %s did not return list: %s.""" % (func.__name__, str(e)))
if len(lsslist) == 0:
print("""WrappedDatabase %s: Subset %s NOT formed: empty""" % (self.dbse, label))
return
self.sset[label] = OrderedDict()
for rxn in lsslist:
self.sset[label][rxn] = self.hrxn[rxn]
self.oss[label] = Subset(name=label,
hrxn=self.sset[label].keys(),
tagl=tagl)
print("""WrappedDatabase %s: Subset %s formed: %d""" % (self.dbse, label, len(self.sset[label].keys())))
def compute_errors(self, modelchem, benchmark='default', sset='default', failoninc=True, verbose=False):
"""For full database or subset *sset*, computes raw reaction
errors between *modelchem* and *benchmark* model chemistries.
Returns error if model chemistries are missing for any reaction in
subset unless *failoninc* set to False, whereupon returns partial.
Returns dictionary of reaction labels and error forms.
"""
if isinstance(sset, basestring):
# sset is normal subset name 'MX' corresponding to HRXN_MX or MX array in database module
try:
lsset = self.sset[sset.lower()]
except KeyError as e:
# raise ValidationError("""Subset named %s not available""" % (str(e)))
lsset = OrderedDict()
else:
if callable(sset):
# sset is function that will generate subset of HRXN from sset(self)
lsslist = [rxn for rxn in self.sset['default'].keys() if rxn in sset(self)]
else:
# sset is array containing reactions
lsslist = [rxn for rxn in self.sset['default'].keys() if rxn in sset]
# assemble dict of qcdb.Reaction objects from array of reaction names
lsset = OrderedDict()
for rxn in lsslist:
lsset[rxn] = self.hrxn[rxn]
# cureinfo = self.get_pec_weightinfo()
err = {}
for rxn, oRxn in lsset.items():
lbench = oRxn.benchmark if benchmark == 'default' else benchmark
try:
mcLesser = oRxn.data[modelchem].value
except KeyError as e:
if failoninc:
raise ValidationError("""Reaction %s missing datum %s.""" % (str(rxn), str(e)))
else:
continue
try:
mcGreater = oRxn.data[lbench].value
except KeyError as e:
if lbench == 'ZEROS':
pass
else:
print("""Reaction %s missing benchmark""" % (str(rxn)))
continue
# handle particulars of PEC error measures
# rxncureinfo = cureinfo[rxn]
# try:
# mcGreaterCrvmin = self.hrxn[rxncureinfo['eq']].data[lbench].value
# except KeyError as e:
# print """Reaction %s missing benchmark""" % (str(eqrxn))
# cure_denom = cure_weight(refrxn=mcGreater, refeq=mcGreaterCrvmin, rrat=rxncureinfo['Rrat'])
# balanced_mask, balwt = balanced_error(refrxn=mcGreater, refeq=mcGreaterCrvmin, rrat=rxncureinfo['Rrat'])
if lbench == 'ZEROS':
err[rxn] = [mcLesser,
0.0, 0.0, 0.0, 1.0] # FAKE
else:
err[rxn] = [mcLesser - mcGreater,
(mcLesser - mcGreater) / abs(mcGreater),
(mcLesser - mcGreater) / abs(mcGreater), # FAKE
(mcLesser - mcGreater) / abs(mcGreater), # FKAE
1.0 # FAKE
]
# (mcLesser - mcGreater) / abs(cure_denom),
# (mcLesser - mcGreater) * balanced_mask / abs(mcGreaterCrvmin),
# balwt]
if verbose:
print("""p = %8.4f, pe = %8.3f%%, pbe = %8.3f%% pce = %8.3f%% reaction %s.""" %
(err[rxn][0], 100 * err[rxn][1], 100 * err[rxn][3], 100 * err[rxn][2], str(rxn)))
return err
def compute_statistics(self, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False, returnindiv=False):
"""For full database or subset *sset*, computes many error
statistics between single *modelchem* and *benchmark* model
chemistries. Returns error if model chemistries are missing
for any reaction in subset unless *failoninc* set to False,
whereupon returns partial statistics. Returns dictionary of
statistics labels and values.
"""
err = self.compute_errors(modelchem, benchmark=benchmark, sset=sset, failoninc=failoninc, verbose=verbose)
if len(err) == 0:
error = initialize_errors()
if verbose:
print("""Warning: nothing to compute.""")
else:
Nrxn = float(len(err))
error = OrderedDict()
# linear (absolute) error
linear = [val[0] for val in err.values()]
error['pexe'] = max(linear)
error['nexe'] = min(linear)
error['maxe'] = max(linear, key=lambda x: abs(x))
error['mine'] = min(linear, key=lambda x: abs(x))
error['me'] = sum(linear) / Nrxn
error['mae'] = sum(map(abs, linear)) / Nrxn
error['rmse'] = math.sqrt(sum(map(lambda x: x ** 2, linear)) / Nrxn)
error['stde'] = math.sqrt((sum(map(lambda x: x ** 2, linear)) - (sum(linear) ** 2) / Nrxn) / Nrxn)
# fractional (relative) error
relative = [val[1] for val in err.values()]
error['pexpe'] = max(relative)
error['nexpe'] = min(relative)
error['maxpe'] = max(relative, key=lambda x: abs(x))
error['minpe'] = min(relative, key=lambda x: abs(x))
error['mpe'] = sum(relative) / Nrxn
error['mape'] = sum(map(abs, relative)) / Nrxn
error['rmspe'] = math.sqrt(sum(map(lambda x: x ** 2, relative)) / Nrxn)
error['stdpe'] = math.sqrt((sum(map(lambda x: x ** 2, relative)) - (sum(relative) ** 2) / Nrxn) / Nrxn)
# balanced (relative) error
balanced = [val[3] for val in err.values()]
balwt = sum([val[4] for val in err.values()]) # get the wt fn. highly irregular TODO
error['pexpbe'] = max(balanced)
error['nexpbe'] = min(balanced)
error['maxpbe'] = max(balanced, key=lambda x: abs(x))
error['minpbe'] = min(balanced, key=lambda x: abs(x))
error['mpbe'] = sum(balanced) / balwt #Nrxn
error['mapbe'] = sum(map(abs, balanced)) / balwt #Nrxn
error['rmspbe'] = math.sqrt(sum(map(lambda x: x ** 2, balanced)) / balwt) #Nrxn)
error['stdpbe'] = None # get math domain errors w/wt in denom math.sqrt((sum(map(lambda x: x ** 2, balanced)) - (sum(balanced) ** 2) / balwt) / balwt) #/ Nrxn) / Nrxn)
# capped (relative) error
capped = [val[2] for val in err.values()]
error['pexpce'] = max(capped)
error['nexpce'] = min(capped)
error['maxpce'] = max(capped, key=lambda x: abs(x))
error['minpce'] = min(capped, key=lambda x: abs(x))
error['mpce'] = sum(capped) / Nrxn
error['mapce'] = sum(map(abs, capped)) / Nrxn
error['rmspce'] = math.sqrt(sum(map(lambda x: x ** 2, capped)) / Nrxn)
error['stdpce'] = math.sqrt((sum(map(lambda x: x ** 2, capped)) - (sum(capped) ** 2) / Nrxn) / Nrxn)
if verbose:
print("""%d systems in %s for %s vs. %s, subset %s.\n%s""" %
(len(err), self.dbse, modelchem, benchmark, sset, format_errors(error, mode=2)))
if returnindiv:
return error, err
else:
return error
def load_qcdata(self, modname, funcname, pythonpath=None, failoninc=True):
"""Loads qcdb.ReactionDatums from module *modname* function
*funcname*. Module search path can be prepended with *pythonpath*.
"""
if pythonpath is not None:
sys.path.insert(1, pythonpath)
else:
sys.path.append(os.path.dirname(__file__) + '/../data')
try:
datamodule = __import__(modname)
except ImportError:
if not failoninc:
print("""%s data unavailable for database %s.\n""" % (modname, self.dbse))
return
else:
print("""\nPython module for database data %s failed to load\n\n""" % (modname))
print("""\nSearch path that was tried:\n""")
print(', '.join(map(str, sys.path)))
raise ValidationError("""Python module loading problem for database data """ + str(modname))
try:
getattr(datamodule, funcname)(self)
except AttributeError:
if not failoninc:
print("""%s %s data unavailable for database %s.\n""" % (modname, funcname, self.dbse))
return
else:
raise ValidationError("Python module missing function %s for loading data " % (str(funcname)))
print("""WrappedDatabase %s: %s %s results loaded""" % (self.dbse, modname, funcname))
def load_qcdata_byproject(self, project, pythonpath=None):
"""Loads qcdb.ReactionDatums from standard location for *project*
:module dbse_project and function load_project. Module search path
can be prepended with *pythonpath*.
"""
mod = self.dbse + '_' + project
func = 'load_' + project
self.load_qcdata(modname=mod, funcname=func, pythonpath=pythonpath)
def load_qcdata_hrxn_byproject(self, project, path=None):
""""""
if path is None:
path = os.path.dirname(__file__) + '/../data'
pklfile = os.path.abspath(path) + os.sep + self.dbse + '_hrxn_' + project + '.pickle'
if not os.path.isfile(pklfile):
raise ValidationError(
"Reactions pickle file for loading database data from file %s does not exist" % (pklfile))
with open(pklfile, 'rb') as handle:
hrxns = pickle.load(handle)
# no error checking for speed
for rxn, data in hrxns.items():
self.hrxn[rxn].data.update(data)
def load_qcdata_hdf5_trusted(self, project, path=None):
"""Loads qcdb.ReactionDatums from HDF5 file at path/dbse_project.h5 .
If path not given, looks in qcdb/data. This file is written by
reap-DB and so has been largely validated.
"""
if path is None:
path = os.path.dirname(__file__) + '/../data'
hdf5file = os.path.abspath(path) + os.sep + self.dbse + '_' + project + '.h5'
if not os.path.isfile(hdf5file):
raise ValidationError("HDF5 file for loading database data from file %s does not exist" % (hdf5file))
try:
import pandas as pd
except ImportError:
raise ValidationError("Pandas data managment module must be available for import")
try:
next(self.hrxn.iterkeys()) + 1
except TypeError:
intrxn = False
else:
intrxn = True
with pd.get_store(hdf5file) as handle:
for mc in handle['pdie'].keys():
lmc = mc.split('-') # TODO could be done better
method = lmc[0]
bsse = '_'.join(lmc[1:-1])
basis = lmc[-1]
df = handle['pdie'][mc]
for dbrxn in df.index[df.notnull()].values:
[dbse, rxn] = dbrxn.split('-', 1)
if intrxn:
rxn = int(rxn)
self.hrxn[rxn].data[mc] = ReactionDatum.library_modelchem(dbse=dbse, rxn=rxn,
method=method, mode=bsse, basis=basis,
value=df[dbrxn])
def integer_reactions(self):
"""Returns boolean of whether reaction names need to be cast to integer"""
try:
next(self.hrxn.iterkeys()) + 1
except TypeError:
return False
else:
return True
@staticmethod
def load_pickled(dbname, path=None):
"""
"""
if path is None:
path = os.path.dirname(__file__) + '/../data'
picklefile = psiutil.findfile_ignorecase(dbname,
pre=os.path.abspath(path) + os.sep, post='_WDb.pickle')
if not picklefile:
raise ValidationError("Pickle file for loading database data from file %s does not exist" % (
os.path.abspath(path) + os.sep + dbname + '.pickle'))
# with open('/var/www/html/bfdb_devel/bfdb/scratch/ASDFlogfile.txt', 'a') as handle:
# handle.write('<!-- PICKLE %s\n' % (picklefile))
with open(picklefile, 'rb') as handle:
instance = pickle.load(handle)
return instance
def available_modelchems(self, union=True):
"""Returns all the labels of model chemistries that have been
loaded. Either all modelchems that have data for any reaction if
*union* is True or all modelchems that have data for all reactions
if *union* is False.
"""
mcs = [set(v.data) for v in self.hrxn.itervalues()]
if union:
return sorted(set.union(*mcs))
else:
return sorted(set.intersection(*mcs))
def benchmark(self):
"""Returns the model chemistry label for the database's benchmark."""
bm = None
rxns = self.hrxn.itervalues()
while bm is None:
try:
bm = next(rxns).benchmark
except StopIteration:
break
return bm
# return next(self.hrxn.itervalues()).benchmark
# TODO all rxns have same bench in db module so all have same here in obj
# but the way things stored in Reactions, this doesn't have to be so
def load_subsets(self, modname='subsetgenerator', pythonpath=None):
"""Loads subsets from all functions in module *modname*.
"""
if pythonpath is not None:
sys.path.insert(1, pythonpath)
else:
sys.path.append(os.path.dirname(__file__))
try:
ssmod = __import__(modname)
except ImportError:
print("""\nPython module for database data %s failed to load\n\n""" % (modname))
print("""\nSearch path that was tried:\n""")
print(', '.join(map(str, sys.path)))
raise ValidationError("Python module loading problem for database subset generator " + str(modname))
for func in dir(ssmod):
if callable(getattr(ssmod, func)):
self.add_Subset(getattr(ssmod, func).__doc__, getattr(ssmod, func))
print("""WrappedDatabase %s: Defined subsets loaded""" % (self.dbse))
def get_pec_weightinfo(self):
"""
"""
def closest(u, options):
return max(options, key=lambda v: len(os.path.commonprefix([u, v])))
dbdat = {}
oss = self.oss['default']
eqrxns = [rxn for rxn, rr in zip(oss.hrxn, oss.axis['Rrat']) if rr == 1.0]
for rxnix, rxn in enumerate(oss.hrxn):
dbdat[rxn] = {'eq': closest(rxn, eqrxns),
'Rrat': oss.axis['Rrat'][rxnix]}
return dbdat
# def table_simple1(self, mtd, bas, opt=['CP'], err=['mae'], benchmark='default', failoninc=True,
# plotpath='analysis/flats/flat_', theme='smmerge'):
# rowplan = ['bas', 'mtd']
# columnplan = [
# ['l', r"""Method \& Basis Set""", '', textables.label, {}],
# ['d', r'S22', 'HB', textables.val, {'sset': 'hb'}],
# ['d', r'S22', 'MX', textables.val, {'sset': 'mx'}],
# ['d', r'S22', 'DD', textables.val, {'sset': 'dd'}],
# ['d', r'S22', 'TT', textables.val, {'sset': 'default'}],
# ]
#
# def table_simple2(self, mtd, bas, opt=['CP'], err=['mae'], benchmark='default', failoninc=True,
# plotpath='analysis/flats/flat_', theme='smmerge'):
# rowplan = ['bas', 'mtd']
# columnplan = [
# ['l', r"""Method \& Basis Set""", '', textables.label, {}],
# ['d', r'MAE', 'HB', textables.val, {'sset': 'hb'}],
# ['d', r'MAE', 'MX', textables.val, {'sset': 'mx'}],
# ['d', r'MAE', 'DD', textables.val, {'sset': 'dd'}],
# ['d', r'MAE', 'TT', textables.val, {'sset': 'default'}],
# ['d', r'MA\%E', 'HB', textables.val, {'sset': 'hb', 'err': 'mape'}],
# ['d', r'MA\%E', 'MX', textables.val, {'sset': 'mx', 'err': 'mape'}],
# ['d', r'MA\%E', 'DD', textables.val, {'sset': 'dd', 'err': 'mape'}],
# ['d', r'MA\%E', 'TT', textables.val, {'sset': 'default', 'err': 'mape'}],
# ['d', r'maxE', 'TT ', textables.val, {'sset': 'default', 'err': 'maxe'}],
# ['d', r'min\%E', ' TT', textables.val, {'sset': 'default', 'err': 'minpe'}],
# ['d', r'rmsE', 'TT ', textables.val, {'sset': 'default', 'err': 'rmse'}],
# ['d', r'devE', ' TT', textables.val, {'sset': 'default', 'err': 'stde'}],
# ]
#
# def table_simple3(self, mtd, bas, opt=['CP'], err=['mae'], benchmark='default', failoninc=True,
# plotpath='analysis/flats/flat_', theme='smmerge'):
# rowplan = ['err', 'bas', 'mtd']
# columnplan = [
# ['l', r"""Method \& Basis Set""", '', textables.label, {}],
# ['d', r'MAE', 'HB', textables.val, {'sset': 'hb'}],
# ['d', r'MAE', 'MX', textables.val, {'sset': 'mx'}],
# ['d', r'MAE', 'DD', textables.val, {'sset': 'dd'}],
# ['d', r'MAE', 'TT', textables.val, {'sset': 'default'}],
# ]
#
# def table_simple4(self, mtd, bas, opt=['CP'], err=['mae'], benchmark='default', failoninc=True,
# plotpath='analysis/flats/flat_', theme='smmerge'):
# plotpath = 'autogen' # TODO handle better
# rowplan = ['bas', 'mtd']
# columnplan = [
# ['l', r"""Method \& Basis Set""", '', textables.label, {}],
# ['d', r'S22', 'HB', textables.val, {'sset': 'hb'}],
# ['d', r'S22', 'MX', textables.val, {'sset': 'mx'}],
# ['d', r'S22', 'DD', textables.val, {'sset': 'dd'}],
# ['d', r'S22', 'TT', textables.val, {'sset': 'default'}],
# # ['l', r"""Error Distribution\footnotemark[1]""", r"""\includegraphics[width=6.67cm,height=3.5mm]{%s%s.pdf}""" % (plotpath, 'blank'), textables.graphics, {}],
# ['l', r"""Error Distribution\footnotemark[1]""", r"""""", textables.graphics, {}],
# ]
class Database(object):
"""Collection for handling single or multiple qcdb.WrappedDatabase objects.
Particularly, unifying modelchem and subset names that when inconsistent
across component databases. Also, defining statistics across databases.
>>> asdf = qcdb.Database(['s22', 'Nbc10', 'hbc6', 'HSG'], 'DB4')
>>> qwer = qcdb.Database('s22')
"""
def __init__(self, dbnamelist, dbse=None, pythonpath=None, loadfrompickle=False, path=None):
#: internal name of database collection
#:
#: >>> print asdf.dbse
#: 'DB4'
self.dbse = None
#: ordered component Database objects
#:
#: >>> print asdf.dbdict
#: XXXX
self.dbdict = OrderedDict()
#: subset assembly pattern
#:
#: >>> print asdf.sset.keys()
#: XXXX
self.sset = OrderedDict()
#: assembly pattern for transspecies modelchems
#:
#: >>> print asdf.mcs.keys()
#: XXXX
self.mcs = {}
self.benchmark = None
# slight validation, repackaging into dbnamelist
if isinstance(dbnamelist, basestring):
dbnamelist = [dbnamelist]
elif all(isinstance(item, basestring) for item in dbnamelist):
pass
else:
raise ValidationError('Database::constructor: Inappropriate configuration of constructor arguments')
# load databases
for db in dbnamelist:
if loadfrompickle:
tmp = WrappedDatabase.load_pickled(db, path=path)
else:
tmp = WrappedDatabase(db, pythonpath=pythonpath)
self.dbdict[tmp.dbse] = tmp
# slurp up the obvious overlaps
consolidated_bench = [odb.benchmark() for odb in self.dbdict.values()]
if len(set(consolidated_bench)) == 1:
self.benchmark = consolidated_bench[0]
else:
self.benchmark = ''.join(consolidated_bench)
self.mcs[self.benchmark] = consolidated_bench
# methods[ref] = Method(name=ref)
# bases[ref] = BasisSet(name=ref)
self.mcs['default'] = consolidated_bench
# self.mcs['default'] = [odb.benchmark() for odb in self.dbdict.values()]
self._intersect_subsets()
self._intersect_modelchems()
# complex subsets
self.load_subsets()
# collection name
self.dbse = ''.join(self.dbdict.keys()) if dbse is None else dbse
# merge Reaction-s
self.hrxn = OrderedDict()
for db, odb in self.dbdict.items():
for rxn, orxn in odb.hrxn.items():
self.hrxn[orxn.dbrxn] = orxn
# merge Reagent-s
self.hrgt = OrderedDict()
for db, odb in self.dbdict.items():
for rgt, orgt in odb.hrgt.items():
self.hrgt[orgt.name] = orgt
print("""Database %s: %s""" % (self.dbse, ', '.join(self.dbdict.keys())))
def __str__(self):
text = ''
text += """ ===> %s Database <===\n\n""" % (self.dbse)
# text += """ Reagents: %s\n""" % (self.hrgt.keys())
# text += """ Reactions: %s\n""" % (self.hrxn.keys())
text += """ Subsets: %s\n""" % (self.sset.keys())
# text += """ Reference: %s\n""" % ('default: ' + ' + '.join(self.mcs['default']))
try:
text += """ Reference: %s\n""" % (self.benchmark + ': ' + ' + '.join(self.mcs[self.benchmark]))
except TypeError:
text += """ Reference: %s\n""" % ('UNDEFINED')
text += """ Model Chemistries: %s\n""" % (
', '.join(sorted([mc for mc in self.mcs.keys() if mc is not None])))
text += """\n"""
for db in self.dbdict.keys():
text += self.dbdict[db].__str__()
return text
# def benchmark(self):
# """Returns the model chemistry label for the database's benchmark."""
# return self.benchmark #TODO not sure if right way to go about this self.mcs['default']
def fancy_mcs(self, latex=False):
"""
"""
fmcs = {}
for mc in self.mcs.keys():
try:
mtd, mod, bas = mc.split('-')
except ValueError:
fmcs[mc] = mc
else:
if latex:
tmp = """%s/%s, %s""" % \
(methods[mtd].latex, bases[bas].latex, mod.replace('_', '\\_'))
fmcs[mc] = """%45s""" % (tmp)
else:
fmcs[mc] = """%20s / %-20s, %s""" % \
(methods[mtd].fullname, bases[bas].fullname, mod)
return fmcs
# def fancy_mcs_nested(self):
# """
# """
# fmcs = defaultdict(lambda: defaultdict(dict))
# for mc in self.mcs.keys():
# try:
# mtd, mod, bas = mc.split('-')
# except ValueError:
# fmcs['All']['All'][mc] = mc
# fmcs['Method']['Others'][mc] = mc
# fmcs['Options']['Others'][mc] = mc
# fmcs['Basis Treatment']['Others'][mc] = mc
# else:
# fancyrepr = """%20s / %-20s %s""" % (methods[mtd].latex, bases[bas].latex, mod)
# fmcs['All']['All'][mc] = fancyrepr
# fmcs['Method'][methods[mtd].latex][mc] = fancyrepr
# fmcs['Options'][mod][mc] = fancyrepr
# fmcs['Basis Treatment'][bases[bas].latex][mc] = fancyrepr
# return fmcs
def integer_reactions(self):
"""Returns boolean of whether reaction names need to be cast to integer"""
return {db: odb.integer_reactions() for db, odb in self.dbdict.items()}
def load_qcdata_byproject(self, project, pythonpath=None):
"""For each component database, loads qcdb.ReactionDatums from
standard location for *project* :module dbse_project and function
load_project. Module search path can be prepended with *pythonpath*.
"""
for db, odb in self.dbdict.items():
odb.load_qcdata_byproject(project, pythonpath=pythonpath)
self._intersect_modelchems()
def load_qcdata_hdf5_trusted(self, project, path=None):
"""For each component database, loads qcdb.ReactionDatums from
HDF5 file at path/dbse_project.h5 . If path not given, looks in
qcdb/data. This file is written by reap-DB and so has been largely
validated.
"""
for db, odb in self.dbdict.items():
odb.load_qcdata_hdf5_trusted(project, path=path)
self._intersect_modelchems()
def load_qcdata_hrxn_byproject(self, project, path=None):
for db, odb in self.dbdict.items():
odb.load_qcdata_hrxn_byproject(project, path=path)
self._intersect_modelchems()
def available_projects(self, path=None):
""""""
import glob
if path is None:
path = os.path.dirname(__file__) + '/../data'
projects = []
for pjfn in glob.glob(path + '/*_hrxn_*.pickle'):
pj = pjfn[:-7].split('_')[-1]
projects.append(pj)
complete_projects = []
for pj in set(projects):
if all([os.path.isfile(path + '/' + db + '_hrxn_' + pj + '.pickle') for db in self.dbdict.keys()]):
complete_projects.append(pj)
return complete_projects
def load_subsets(self, modname='subsetgenerator', pythonpath=None):
"""For each component database, loads subsets from all functions
in module *modname*. Default *modname* usues standard generators.
"""
for db, odb in self.dbdict.items():
odb.load_subsets(modname=modname, pythonpath=pythonpath)
self._intersect_subsets()
def add_Subset(self, name, func):
"""Define a new subset labeled *name* by providing a database
*func* whose keys are the keys of dbdict and whose values are a
function that filters each WrappedDatabase's *self.hrxn*.
"""
label = name.lower()
merged = []
for db, odb in self.dbdict.items():
if callable(func[db]):
ssfunc = func[db]
else:
ssfunc = lambda x: func[db]
odb.add_Subset(name=name, func=ssfunc)
if name in odb.sset:
merged.append(name)
else:
merged.append(None)
if any(merged):
self.sset[label] = merged
print("""Database %s: Subset %s formed: %s""" % (self.dbse, label, self.sset[label]))
else:
print("""Database %s: Subset %s NOT formed: empty""" % (self.dbse, label))
def add_Subset_union(self, name, sslist):
"""
Define a new subset labeled *name* (note that there's nothing to
prevent overwriting an existing subset name) from the union of
existing named subsets in *sslist*.
"""
funcdb = {}
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
overlapping_dbrxns = []
for ss in sslist:
lss = self.sset[ss][dbix]
if lss is not None:
overlapping_dbrxns.append(self.dbdict[db].sset[lss].keys())
rxnlist = set().union(*overlapping_dbrxns)
funcdb[db] = rxnlist
self.add_Subset(name, funcdb)
def add_sampled_Subset(self, sset='default', number_of_samples=1, sample_size=5, prefix='rand'):
"""Generate and register *number_of_samples* new subsets of size
*sample_size* and name built from *prefix*. Reactions chosen from *sset*.
"""
import random
intrxn = self.integer_reactions()
rxns = self.get_hrxn(sset=sset).keys()
def random_sample(ssname):
"""Generate and register a single new subset of size *sample_size* and
name *ssname*.
"""
sample = {db: [] for db in self.dbdict.keys()}
for dbrxn in random.sample(rxns, sample_size):
db, rxn = dbrxn.split('-', 1)
typed_rxn = int(rxn) if intrxn[db] else rxn
sample[db].append(typed_rxn)
self.add_Subset(ssname, sample)
for sidx in range(number_of_samples):
if number_of_samples == 1:
ssname = prefix
else:
ssname = prefix + '_' + str(sidx)
random_sample(ssname)
def promote_Subset(self, name=None):
"""Examine component databases and elevate subset *name* not necessarily
present for all component databases to a subset for the *self*. When *name*
is None, promotes all subsets found for component databases. Also promotes
entirety of each component database as a subset with name of component
database dbse in lowercase.
"""
if name is None:
sss = [set(odb.sset.keys()) for db, odb in self.dbdict.items()]
new = sorted(set.union(*sss))
else:
new = [name]
for ss in new:
if ss not in self.sset:
self.sset[ss] = [ss if ss in odb.sset else None for db, odb in self.dbdict.items()]
print("""Database %s: Subset %s promoted: %s""" % (self.dbse, ss, self.sset[ss]))
if name is None and len(self.dbdict) > 1:
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
ss = odb.dbse.lower()
if ss not in self.sset:
self.sset[ss] = ['default' if ix == dbix else None for ix in range(len(self.dbdict))]
print("""Database %s: Subset %s promoted: %s""" % (self.dbse, ss, self.sset[ss]))
def _intersect_subsets(self):
"""Examine component database subsets and collect common names as
Database subset.
"""
sss = [set(odb.sset.keys()) for db, odb in self.dbdict.items()]
new = sorted(set.intersection(*sss))
for ss in new:
self.sset[ss] = [ss] * len(self.dbdict.keys())
def _intersect_modelchems(self):
"""Examine component database qcdata and collect common names as
Database modelchem.
"""
mcs = [set(odb.available_modelchems()) for odb in self.dbdict.itervalues()]
new = sorted(set.intersection(*mcs))
for mc in new:
self.mcs[mc] = [mc] * len(self.dbdict.keys())
# def reaction_generator(self):
# """
# """
# for db, odb in self.dbdict.items():
# for rxn, orxn in odb.hrxn.items():
# yield orxn
def compute_statistics(self, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False, returnindiv=False):
"""Computes summary statistics and, if *returnindiv* True,
individual errors for single model chemistry *modelchem* versus
*benchmark* over subset *sset* over all component databases.
Particularly, imposes cross-database definitions for sset and
modelchem.
#Returns error if model chemistries are missing
#for any reaction in subset unless *failoninc* set to False,
#whereupon returns partial statistics. Returns dictionary of
#statistics labels and values.
"""
errors = OrderedDict()
indiv = OrderedDict()
actvdb = []
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
if self.sset[sset][dbix] is None:
errors[db], indiv[db] = (None, None)
else:
errors[db], indiv[db] = odb.compute_statistics(self.mcs[modelchem][dbix],
sset=self.sset[sset][dbix],
benchmark='ZEROS' if benchmark == 'ZEROS' else self.mcs[benchmark][dbix],
failoninc=failoninc, verbose=verbose, returnindiv=True)
actvdb.append(errors[db])
errors[self.dbse] = average_errors(*actvdb)
if returnindiv:
return errors, indiv
else:
return errors
def analyze_modelchems(self, modelchem, benchmark='default', failoninc=True, verbose=False):
"""For each component database, compute and print nicely formatted
summary error statistics for each model chemistry in array
*modelchem* versus *benchmark* for all available subsets.
"""
# compute errors
errors = {}
for mc in modelchem:
errors[mc] = {}
for ss in self.sset.keys():
errors[mc][ss] = self.compute_statistics(mc, benchmark=benchmark, sset=ss,
failoninc=failoninc, verbose=verbose, returnindiv=False)
# present errors
pre, suf, mid = string_contrast(modelchem)
text = """\n ==> %s %s[]%s Errors <==\n""" % (self.dbse, pre, suf)
text += """%20s %44s""" % ('', '==> ' + self.dbse + ' <==')
for db, odb in self.dbdict.items():
text += """%44s""" % ('=> ' + odb.dbse + ' <=')
text += '\n'
collabel = """ {:5} {:4} {:6} {:6} {:6}""".format(
'ME', 'STDE', 'MAE', 'MA%E', 'MA%BE')
text += """{:20} """.format('') + collabel
for db in self.dbdict.keys():
text += collabel
text += '\n'
text += """{:20} {}""".format('', '=' * 44)
ul = False
for db in self.dbdict.keys():
text += """{}""".format('_' * 44 if ul else ' ' * 44)
ul = not ul
text += '\n'
for ss in self.sset.keys():
text += """ => %s <=\n""" % (ss)
for mc in modelchem:
perr = errors[mc][ss]
text += """%20s %44s""" % (mid[modelchem.index(mc)],
format_errors(perr[self.dbse]))
for db in self.dbdict.keys():
text += """%44s""" % ('' if perr[db] is None else format_errors(perr[db]))
text += '\n'
print(text)
def plot_bars(self, modelchem, benchmark='default', sset=['default', 'hb', 'mx', 'dd'],
failoninc=True, verbose=False, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Prepares 'grey bars' diagram for each model chemistry in array
*modelchem* versus *benchmark* over all component databases. A wide bar
is plotted with three smaller bars, corresponding to the 'mae'
summary statistic of the four subsets in *sset*.
*saveas* conveys directory ('/') and/or filename for saving the
resulting plot. File extension is not accessible, but *graphicsformat*
array requests among 'png', 'pdf', and 'eps' formats. *relpath*
forces paths to saved files to be relative to current directory,
rather than absolute paths for returned code and file dictionary.
Prepares bars diagram instructions and either executes them if
matplotlib available (Canopy or Anaconda) or prints them. Returns a
dictionary of all saved plot filenames.
>>> asdf.plot_bars(['MP2-CP-adz', 'MP2-CP-adtz'], sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
"""
# compute errors
errors = {}
for mc in modelchem:
if mc is not None:
errors[mc] = {}
for ss in sset:
errors[mc][ss] = self.compute_statistics(mc, benchmark=benchmark, sset=ss,
failoninc=failoninc, verbose=verbose, returnindiv=False)
# repackage
pre, suf, mid = string_contrast(modelchem)
dbdat = []
for mc in modelchem:
if mc is None:
dbdat.append(None)
else:
dbdat.append({'mc': mid[modelchem.index(mc)],
'data': [errors[mc][ss][self.dbse]['mae'] for ss in sset]})
title = self.dbse + ' ' + pre + '[]' + suf + ' ' + ','.join(sset)
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict = mpl.bars(%s,\n title='%s'\n saveas=%s\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, title, repr(saveas), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict = mpl.bars(dbdat, title=title,
view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
# def get_pec_weightinfo(self):
# """
#
# """
# def closest(u, options):
# return max(options, key=lambda v: len(os.path.commonprefix([u, v])))
#
# dbdat = {}
# for db, odb in self.dbdict.items():
# #dbix = self.dbdict.keys().index(db)
# oss = odb.oss['default']
# eqrxns = [rxn for rxn, rr in zip(oss.hrxn, oss.axis['Rrat']) if rr == 1.0]
# for rxnix, rxn in enumerate(oss.hrxn):
# dbrxn = '-'.join([db, rxn])
# rrat = oss.axis['Rrat'][rxnix]
# eq = closest(rxn, eqrxns)
# print rxn, rxnix, eq, rrat, dbrxn
# dbdat[dbrxn] = {'eq': eq, 'Rrat': rrat}
# return dbdat
def plot_axis(self, axis, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False, color='sapt', view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""
"""
dbdatdict = OrderedDict()
for mc in modelchem:
# compute errors
errors, indiv = self.compute_statistics(mc, benchmark=benchmark, sset=sset,
failoninc=failoninc, verbose=verbose, returnindiv=True)
# repackage
dbdat = []
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
oss = odb.oss[self.sset[sset][dbix]]
# TODO may need to make axis name distributable across wrappeddbs
# TODO not handling mc present bm absent
if indiv[db] is not None:
for rxn in oss.hrxn:
rxnix = oss.hrxn.index(rxn)
bm = self.mcs[benchmark][dbix]
bmpresent = False if (bm is None or bm not in odb.hrxn[rxn].data) else True
mcpresent = False if (self.mcs[mc][dbix] not in odb.hrxn[rxn].data) else True
entry = {'db': db,
'sys': str(rxn),
'color': odb.hrxn[rxn].color,
'axis': oss.axis[axis][rxnix]}
if bmpresent:
entry['bmdata'] = odb.hrxn[rxn].data[self.mcs[benchmark][dbix]].value
else:
entry['bmdata'] = None
if mcpresent:
entry['mcdata'] = odb.hrxn[rxn].data[self.mcs[mc][dbix]].value
else:
continue
if bmpresent and mcpresent:
entry['error'] = [indiv[db][rxn][0]]
else:
entry['error'] = [None]
dbdat.append(entry)
dbdatdict[fancify_mc_tag(mc).strip()] = dbdat
pre, suf, mid = string_contrast(modelchem)
title = """%s[%s]%s vs %s axis %s for %s subset %s""" % (pre, str(len(mid)), suf, benchmark, axis, self.dbse, sset)
print(title)
#for mc, dbdat in dbdatdict.items():
# print mc
# for d in dbdat:
# print '{:20s} {:8.2f} {:8.2f} {:8.2f}'.format(d['sys'], d['axis'],
# 0.0 if d['bmdata'] is None else d['bmdata'],
# 0.0 if d['mcdata'] is None else d['mcdata'])
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict = mpl.valerr(%s,\n color='%s',\n title='%s',\n xtitle='%s',\n view=%s\n saveas=%s\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, color, title, axis, view, repr(saveas), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict = mpl.valerr(dbdatdict, color=color, title=title, xtitle=axis,
view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def load_saptdata_frombfdb(self, sset='default',
pythonpath='/Users/loriab/linux/bfdb/sapt_punt', failoninc=True): # pythonpath=None
"""This is a stopgap function that loads sapt component data from
sapt_punt in bfdb repo.
"""
saptpackage = OrderedDict()
for db, odb in self.dbdict.items():
modname = 'sapt_' + odb.dbse
if pythonpath is not None:
sys.path.insert(1, pythonpath)
else:
sys.path.append(os.path.dirname(__file__) + '/../data')
try:
datamodule = __import__(modname)
except ImportError:
print("""\nPython module for database data %s failed to load\n\n""" % (modname))
print("""\nSearch path that was tried:\n""")
print(', '.join(map(str, sys.path)))
raise ValidationError("Python module loading problem for database subset generator " + str(modname))
try:
saptdata = getattr(datamodule, 'DATA')
except AttributeError:
raise ValidationError("SAPT punt module does not contain DATA" + str(modname))
saptmc = saptdata['SAPT MODELCHEM']
dbix = self.dbdict.keys().index(db)
for rxn, orxn in odb.hrxn.items():
lss = self.sset[sset][dbix]
if lss is not None:
if rxn in odb.sset[lss]:
dbrxn = orxn.dbrxn
try:
elst = saptdata['SAPT ELST ENERGY'][dbrxn]
exch = saptdata['SAPT EXCH ENERGY'][dbrxn]
ind = saptdata['SAPT IND ENERGY'][dbrxn]
disp = saptdata['SAPT DISP ENERGY'][dbrxn]
except (KeyError, AttributeError):
print("""Warning: DATA['SAPT * ENERGY'] missing for reaction %s""" % (dbrxn))
if failoninc:
break
else:
if not all([elst, ind, disp]): # exch sometimes physically zero
print("""Warning: DATA['SAPT * ENERGY'] missing piece for reaction %s: %s""" % (dbrxn, [elst, exch, ind, disp]))
if failoninc:
break
saptpackage[dbrxn] = {'mc': saptmc,
'elst': elst,
'exch': exch,
'ind': ind,
'disp': disp}
return saptpackage
def plot_ternary(self, sset='default', labeled=True,
pythonpath='/Users/loriab/linux/bfdb/sapt_punt', failoninc=True, # pythonpath=None
view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""This is a stopgap function that loads sapt component data from
sapt_punt in bfdb repo, then formats it to plot a ternary diagram.
"""
saptdata = self.load_saptdata_frombfdb(sset=sset, pythonpath=pythonpath,
failoninc=failoninc)
dbdat = []
mcs = []
for dat in saptdata.values():
dbdat.append([dat['elst'], dat['ind'], dat['disp']])
if dat['mc'] not in mcs:
mcs.append(dat['mc'])
title = ' '.join([self.dbse, sset, ' '.join(mcs)])
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
pass
# if not running from Canopy, print line to execute from Canopy
else:
# if running from Canopy, call mpl directly
filedict = mpl.ternary(dbdat, title=title, labeled=labeled,
view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def plot_flat(self, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False, color='sapt', xlimit=4.0, xlines=[0.0, 0.3, 1.0],
view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Computes individual errors and summary statistics for single
model chemistry *modelchem* versus *benchmark* over
subset *sset* over all component databases. Thread *color* can be
'rgb' for old coloring, a color name or 'sapt' for spectrum coloring.
*saveas* conveys directory ('/') and/or filename for saving the
resulting plot. File extension is not accessible, but *graphicsformat*
array requests among 'png', 'pdf', and 'eps' formats. *relpath*
forces paths to saved files to be relative to current directory,
rather than absolute paths for returned code and file dictionary.
Prepares flat diagram instructions and either executes them if
matplotlib available (Canopy or Anaconda) or prints them. Returns a
dictionary of all saved plot filenames.
asdf.plot_flat('CCSD-CP-atqzadz', failoninc=False)
"""
# compute errors
mc = modelchem
errors, indiv = self.compute_statistics(mc, benchmark=benchmark, sset=sset,
failoninc=failoninc, verbose=verbose, returnindiv=True)
# repackage
dbdat = []
for db, odb in self.dbdict.items():
if indiv[db] is not None:
for rxn in indiv[db].keys():
dbdat.append({'db': db,
'sys': str(rxn),
'color': odb.hrxn[rxn].color,
'data': [indiv[db][rxn][0]]})
pre, suf, mid = string_contrast(mc)
title = self.dbse + '-' + sset + ' ' + pre + '[]' + suf
mae = errors[self.dbse]['mae']
mape = None
# mape = 100 * errors[self.dbse]['mape']
mapbe = None
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict = mpl.flat(%s,\n color='%s',\n title='%s',\n mae=%s,\n mape=%s,\n xlimit=%s,\n xlines=%s,\n view=%s\n saveas=%s\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, color, mc, mae, mape, xlimit, repr(xlines), view, repr(saveas), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict = mpl.flat(dbdat, color=color, title=mc, mae=mae, mape=mape,
xlimit=xlimit, xlines=xlines, view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def write_xyz_files(self, path=None):
"""Writes xyz files for every reagent in the Database to directory
in *path* or to directory dbse_xyzfiles that it createsin cwd if
*path* is None. Additionally, writes a script to that directory
that will generate transparent-background ray-traced png files for
every reagent with PyMol.
"""
if path is None:
xyzdir = os.getcwd() + os.sep + self.dbse + '_xyzfiles' + os.sep
else:
xyzdir = os.path.abspath(path) + os.sep
if not os.path.exists(xyzdir):
os.mkdir(xyzdir)
for rgt, orgt in self.hrgt.items():
omol = Molecule(orgt.mol)
omol.update_geometry()
omol.save_xyz(xyzdir + rgt + '.xyz')
with open(xyzdir + 'pymol_xyz2png_script.pml', 'w') as handle:
handle.write("""
# Launch PyMOL and run from its command line:
# PyMOL> cd {}
# PyMOL> @{}
""".format(xyzdir, 'pymol_xyz2png_script.pml'))
for rgt in self.hrgt.keys():
handle.write("""
load {xyzfile}
hide lines
show sticks
color grey, name c
cmd.set('''opaque_background''','''0''',quiet=0)
reset
orient
cmd.zoom(buffer=0.3, complete=1)
ray
png {pngfile}
reinitialize
""".format(
xyzfile=xyzdir + rgt + '.xyz',
pngfile=xyzdir + rgt + '.png'))
def plot_all_flats(self, modelchem=None, sset='default', xlimit=4.0,
failoninc=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Generate pieces for inclusion into tables. Supply list of
modelchemistries to plot from *modelchem*, otherwise defaults to
all those available. Can modify subset *sset* and plotting
range *xlimit*.
>>> asdf.plot_all_flats(sset='tt-5min', xlimit=4.0)
"""
mcs = self.mcs.keys() if modelchem is None else modelchem
filedict = OrderedDict()
for mc in sorted(mcs):
minifiledict = self.plot_flat(mc, sset=sset, xlimit=xlimit, view=False,
failoninc=failoninc,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
filedict[mc] = minifiledict
return filedict
def get_hrxn(self, sset='default'):
"""
"""
rhrxn = OrderedDict()
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
lss = self.sset[sset][dbix]
if lss is not None:
for rxn in odb.hrxn:
if rxn in odb.sset[lss]:
orxn = odb.hrxn[rxn]
rhrxn[orxn.dbrxn] = orxn # this is a change and conflict with vergil version
return rhrxn
def get_hrgt(self, sset='default', actv='default'):
"""
"""
rhrxn = self.get_hrxn(sset=sset)
rhrgt = OrderedDict()
for rxn, orxn in rhrxn.items():
for orgt in orxn.rxnm[actv].keys():
rhrgt[orgt.name] = orgt
# TODO prob need to avoid duplicates or pass
return rhrgt
def get_reactions(self, modelchem, sset='default', benchmark='default',
failoninc=True):
"""Collects the reactions present in *sset* from each WrappedDatabase,
checks that *modelchem* and *benchmark* ReactionDatum are present
(fails if *failoninc* True), then returns in an array a tuple for
each reaction containing the modelchem key needed to access
*modelchem*, the modelchem key needed to access *benchmark*, and
the Reaction object.
"""
dbdat = []
rhrxn = self.get_hrxn(sset=sset)
for orxn in rhrxn.itervalues():
dbix = self.dbdict.keys().index(orxn.dbrxn.split('-')[0])
lmc = self.mcs[modelchem][dbix]
lbm = self.mcs[benchmark][dbix]
try:
orxn.data[lbm]
except KeyError as e:
# not sure if should treat bm differently
lbm = None
try:
orxn.data[lmc]
except KeyError as e:
if failoninc:
raise e
else:
lmc = None
dbdat.append((lmc, lbm, orxn))
# this is diff in that returning empties not just pass over- may break bfdb
# try:
# orxn.data[lmc]
# orxn.data[lbm]
# except KeyError as e:
# if failoninc:
# raise e
# else:
# # not sure yet if should return empties or just pass over
# pass
# else:
# dbdat.append((lmc, lbm, orxn))
return dbdat
def get_missing_reactions(self, modelchem, sset='default'):
"""Returns a dictionary (keys self.dbse and all component
WrappedDatabase.dbse) of two elements, the first being the number
of reactions *sset* should contain and the second being a list of
the reaction names (dbrxn) not available for *modelchem*. Absence
of benchmark not considered.
"""
counts = OrderedDict()
counts[self.dbse] = [0, []]
soledb = True if (len(self.dbdict) == 1 and self.dbdict.items()[0][0] == self.dbse) else False
if not soledb:
for db in self.dbdict.keys():
counts[db] = [0, []]
for (lmc, lbm, orxn) in self.get_reactions(modelchem, benchmark='default',
sset=sset, failoninc=False):
db, rxn = orxn.dbrxn.split('-', 1)
mcdatum = orxn.data[lmc].value if lmc else None
counts[self.dbse][0] += 1
if not soledb:
counts[db][0] += 1
if mcdatum is None:
counts[self.dbse][1].append(orxn.dbrxn)
if not soledb:
counts[db][1].append(orxn.dbrxn)
return counts
def plot_disthist(self, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False, xtitle='', view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Computes individual errors and summary statistics for single
model chemistry *modelchem* versus *benchmark* over
subset *sset* over all component databases. Computes histogram
of errors and gaussian distribution.
*saveas* conveys directory ('/') and/or filename for saving the
resulting plot. File extension is not accessible, but *graphicsformat*
array requests among 'png', 'pdf', and 'eps' formats. *relpath*
forces paths to saved files to be relative to current directory,
rather than absolute paths for returned code and file dictionary.
Prepares disthist diagram instructions and either executes them if
matplotlib available (Canopy or Anaconda) or prints them. Returns a
dictionary of all saved plot filenames.
>>>
"""
# compute errors
mc = modelchem
errors, indiv = self.compute_statistics(mc, benchmark=benchmark, sset=sset,
failoninc=failoninc, verbose=verbose, returnindiv=True)
# repackage
dbdat = []
for db in self.dbdict.keys():
if indiv[db] is not None:
for rxn in indiv[db].keys():
dbdat.append(indiv[db][rxn][0])
title = """%s vs %s for %s subset %s""" % (mc, benchmark, self.dbse, sset)
me = errors[self.dbse]['me']
stde = errors[self.dbse]['stde']
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict = mpl.disthist(%s,\n title='%s',\n xtitle='%s'\n me=%s,\n stde=%s,\n saveas=%s,\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, title, xtitle, me, stde, repr(saveas), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict = mpl.disthist(dbdat, title=title, xtitle=xtitle, me=me, stde=stde,
view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def plot_modelchems(self, modelchem, benchmark='default', mbenchmark=None,
sset='default', msset=None, failoninc=True, verbose=False, color='sapt',
xlimit=4.0, labeled=True, view=True,
mousetext=None, mouselink=None, mouseimag=None, mousetitle=None, mousediv=None,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Computes individual errors and summary statistics over all component
databases for each model chemistry in array *modelchem* versus *benchmark*
over subset *sset*. *mbenchmark* and *msset* are array options (same
length as *modelchem*) that override *benchmark* and *sset*, respectively,
for non-uniform specification. Thread *color* can be 'rgb' for old
coloring, a color name or 'sapt' for spectrum coloring.
*saveas* conveys directory ('/') and/or filename for saving the
resulting plot. File extension is not accessible, but *graphicsformat*
array requests among 'png', 'pdf', and 'eps' formats. *relpath*
forces paths to saved files to be relative to current directory,
rather than absolute paths for returned code and file dictionary.
Prepares thread diagram instructions and either executes them if
matplotlib available (Canopy or Anaconda) or prints them. Returns a
dictionary of all saved plot filenames. If any of *mousetext*, *mouselink*,
or *mouseimag* is specified, htmlcode will be returned with an image map of
slats to any of text, link, or image, respectively.
"""
# distribute benchmark
if mbenchmark is None:
lbenchmark = [benchmark] * len(modelchem) # normal bm modelchem name
else:
if isinstance(mbenchmark, basestring) or len(mbenchmark) != len(modelchem):
raise ValidationError(
"""mbenchmark must be array of length distributable among modelchem""" % (str(mbenchmark)))
else:
lbenchmark = mbenchmark # array of bm for each modelchem
# distribute sset
if msset is None:
lsset = [sset] * len(modelchem) # normal ss name like 'MX'
else:
if isinstance(msset, basestring) or len(msset) != len(modelchem):
raise ValidationError("""msset must be array of length distributable among modelchem""" % (str(msset)))
else:
lsset = msset # array of ss for each modelchem
# compute errors
index = []
errors = {}
indiv = {}
for mc, bm, ss in zip(modelchem, lbenchmark, lsset):
ix = '%s_%s_%s' % (ss, mc, bm)
index.append(ix)
errors[ix], indiv[ix] = self.compute_statistics(mc, benchmark=bm, sset=ss,
failoninc=failoninc, verbose=verbose, returnindiv=True)
# repackage
dbdat = []
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
for rxn in odb.hrxn:
data = []
for ix in index:
if indiv[ix][db] is not None:
if rxn in odb.sset[self.sset[lsset[index.index(ix)]][dbix]]:
try:
data.append(indiv[ix][db][rxn][0])
except KeyError as e:
if failoninc:
raise e
else:
data.append(None)
else:
data.append(None)
else:
data.append(None)
if not data or all(item is None for item in data):
pass # filter out empty reactions
else:
dbdat.append({'db': db,
'sys': str(rxn),
'show': str(rxn),
'color': odb.hrxn[rxn].color,
'data': data})
mae = [errors[ix][self.dbse]['mae'] for ix in index]
mape = [100 * errors[ix][self.dbse]['mape'] for ix in index]
# form unique filename
ixpre, ixsuf, ixmid = string_contrast(index)
title = self.dbse + ' ' + ixpre + '[]' + ixsuf
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict, htmlcode = mpl.threads(%s,\n color='%s',\n title='%s',\n labels=%s,\n mae=%s,\n mape=%s\n xlimit=%s\n labeled=%s\n saveas=%s\n mousetext=%s\n mouselink=%s\n mouseimag=%s\n mousetitle=%s,\n mousediv=%s,\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, color, title, ixmid, mae, mape, str(xlimit),
repr(labeled), repr(saveas), repr(mousetext), repr(mouselink), repr(mouseimag),
repr(mousetitle), repr(mousediv), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict, htmlcode = mpl.threads(dbdat, color=color, title=title, labels=ixmid, mae=mae, mape=mape,
xlimit=xlimit, labeled=labeled, view=view,
mousetext=mousetext, mouselink=mouselink,
mouseimag=mouseimag, mousetitle=mousetitle, mousediv=mousediv,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict, htmlcode
def plot_liliowa(self, modelchem, benchmark='default',
failoninc=True, xlimit=2.0, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""
Note that not possible to access sset of component databases. That is, for Database SSIBBI, SSI-only arylaryl is accessible b/c not defined in BBI, but SSI-only neutral is not accessible.
"""
# compute errors
mc = modelchem
errors = {}
for ss in self.sset.keys():
errors[ss] = self.compute_statistics(mc, benchmark=benchmark, sset=ss,
failoninc=failoninc, verbose=False, returnindiv=False)
# repackage
dbdat = []
ssarray = ['pospos', 'posneg', 'pospolar', 'posaliph', 'posaryl',
None, 'negneg', 'negpolar', 'negaliph', 'negaryl',
None, None, 'polarpolar', 'polaraliph', 'polararyl',
None, None, None, 'aliphaliph', 'alipharyl',
None, None, None, None, 'arylaryl']
for ss in ssarray:
dbdat.append(0.0 if ss is None else errors[ss][self.dbse]['mae'])
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
print('Matplotlib not avail')
else:
filedict = mpl.liliowa(dbdat, xlimit=xlimit, view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def plot_iowa(self, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False,
title='', xtitle='', xlimit=2.0,
view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Computes individual errors for single *modelchem* versus
*benchmark* over subset *sset*. Coloring green-to-purple with
maximum intensity at *xlimit*. Prepares Iowa plot instructions and
either executes them if matplotlib available (Canopy) or prints them.
"""
title = self.dbse + ' ' + modelchem
# compute errors
mc = modelchem
errors, indiv = self.compute_statistics(mc, benchmark=benchmark, sset=sset,
failoninc=failoninc, verbose=verbose, returnindiv=True)
# repackage
dbdat = []
dblbl = []
for db in self.dbdict.keys():
if indiv[db] is not None:
for rxn in indiv[db].keys():
dbdat.append(indiv[db][rxn][0])
dblbl.append(str(rxn))
title = """%s vs %s for %s subset %s""" % (mc, benchmark, self.dbse, sset)
me = errors[self.dbse]['me']
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""mpl.iowa(%s,\n %s,\n title='%s',\n xtitle='%s'\n xlimit=%s,\n saveas=%s,\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, dblbl, title, xtitle, xlimit, repr(saveas), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict = mpl.iowa(dbdat, dblbl, title=title, xtitle=xtitle, xlimit=xlimit,
view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def export_pandas(self, modelchem=[], benchmark='default', sset='default', modelchemlabels=None,
failoninc=True):
"""
*modelchem* is array of model chemistries, if modelchem is empty, get only benchmark
is benchmark needed?
"""
import pandas as pd
import numpy as np
if self.dbse not in ['ACONF', 'SCONF', 'PCONF', 'CYCONF']:
saptdata = self.load_saptdata_frombfdb(sset=sset, pythonpath='/Users/loriab/linux/bfdb/sapt_punt',
failoninc=failoninc)
listodicts = []
rhrxn = self.get_hrxn(sset=sset)
for dbrxn, orxn in rhrxn.items():
wdb = dbrxn.split('-')[0]
dbix = self.dbdict.keys().index(wdb)
wbm = self.mcs[benchmark][dbix]
wss = self.sset[sset][dbix]
woss = self.dbdict[wdb].oss[wss]
try:
Rrat = woss.axis['Rrat'][woss.hrxn.index(orxn.name)]
except KeyError:
Rrat = 1.0 # TODO generic soln?
dictorxn = {}
dictorxn['DB'] = wdb
dictorxn['System'] = orxn.tagl
dictorxn['Name'] = orxn.name
dictorxn['R'] = Rrat
dictorxn['System #'] = orxn.indx
dictorxn['Benchmark'] = np.NaN if orxn.benchmark is None else orxn.data[
wbm].value # this NaN exception is new and experimental
dictorxn['QcdbSys'] = orxn.dbrxn
if self.dbse not in ['ACONF', 'SCONF', 'PCONF', 'CYCONF']:
dictorxn['SAPT ELST ENERGY'] = saptdata[dbrxn]['elst']
dictorxn['SAPT EXCH ENERGY'] = saptdata[dbrxn]['exch']
dictorxn['SAPT IND ENERGY'] = saptdata[dbrxn]['ind']
dictorxn['SAPT DISP ENERGY'] = saptdata[dbrxn]['disp']
dictorxn['SAPT TOTAL ENERGY'] = dictorxn['SAPT ELST ENERGY'] + dictorxn['SAPT EXCH ENERGY'] + \
dictorxn['SAPT IND ENERGY'] + dictorxn['SAPT DISP ENERGY']
orgts = orxn.rxnm['default'].keys()
omolD = Molecule(orgts[0].mol) # TODO this is only going to work with Reaction ~= Reagent databases
npmolD = omolD.format_molecule_for_numpy()
omolA = Molecule(orgts[1].mol) # TODO this is only going to work with Reaction ~= Reagent databases
omolA.update_geometry()
dictorxn['MonA'] = omolA.natom()
# this whole member fn not well defined for db of varying stoichiometry
if self.dbse in ['ACONF', 'SCONF', 'PCONF', 'CYCONF']:
npmolD = omolD.format_molecule_for_numpy()
npmolA = omolA.format_molecule_for_numpy()
dictorxn['Geometry'] = np.vstack([npmolD, npmolA])
else:
dictorxn['Geometry'] = omolD.format_molecule_for_numpy()
# print '\nD', npmolD.shape[0], npmolA.shape[0], dictorxn['MonA'], npmolD, npmolA, dictorxn['Geometry']
for mc in modelchem:
try:
wmc = self.mcs[mc][dbix]
except KeyError:
# modelchem not in Database at all
print(mc, 'not found')
continue
key = mc if modelchemlabels is None else modelchemlabels[modelchem.index(mc)]
try:
dictorxn[key] = orxn.data[wmc].value
except KeyError as e:
# reaction not in modelchem
if failoninc:
raise ValidationError("""Reaction %s missing datum %s.""" % (key, str(e)))
else:
print(mc, str(e), 'not found')
continue
listodicts.append(dictorxn)
df = pd.DataFrame(listodicts)
pd.set_option('display.width', 500)
print(df.head(5))
print(df.tail(5))
return df
def table_reactions(self, modelchem, benchmark='default', sset='default',
failoninc=True,
columnplan=['indx', 'tagl', 'bm', 'mc', 'e', 'pe'],
title=r"""Reaction energies [kcal/mol] for {sset} $\subset$ {dbse} with {mc}""",
indextitle=r"""Detailed results for {sset} $\subset$ {dbse} with {mc}""",
plotpath='analysis/mols/',
standalone=True, theme='rxns', filename=None):
r"""Prepare single LaTeX table to *filename* or return lines if None showing
the per-reaction results for reactions in *sset* for single or array
or 'all' *modelchem*, where the last uses self.mcs(), model chemistries
versus *benchmark*. Use *failoninc* to toggle between command failing
or blank lines in table. Use *standalone* to toggle between full
compilable document and suitable for inclusion in another LaTeX document.
Use *columnplan* to customize column (from among columnreservoir, below)
layout. Use *title* and *indextitle* to customize table caption and
table-of-contents caption, respectively; variables in curly braces will
be substituted. Use *theme* to customize the \ref{tbl:} code.
"""
# define eligible columns for inclusion
columnreservoir = {
'dbrxn': ['l', r"""\textbf{Reaction}""", """{0:25s}"""],
'indx': ['r', '', """{0:14s}"""],
'tagl': ['l', r"""\textbf{Reaction}""", """{0:50s}"""],
'bm': ['d', r"""\multicolumn{1}{c}{\textbf{Benchmark}}""", """{0:8.2f}"""],
'mc': ['d', r"""\multicolumn{1}{c}{\textbf{ModelChem}}""", """{0:8.2f}"""],
'e': ['d', r"""\multicolumn{1}{c}{\textbf{Error}}""", """{0:8.2f}"""],
'pe': ['d', r"""\multicolumn{1}{c}{\textbf{\% Err.}}""", """{0:8.1f}"""],
'imag': ['l', '', r"""\includegraphics[width=1.0cm,height=3.5mm]{%s%%ss.png}""" % (plotpath)], # untested
}
for col in columnplan:
if col not in columnreservoir.keys():
raise ValidationError('Column {0} not recognized. Register with columnreservoir.'.format(col))
if isinstance(modelchem, basestring):
if modelchem.lower() == 'all':
mcs = sorted(self.mcs.keys())
else:
mcs = [modelchem]
else:
mcs = modelchem
# commence to generate LaTeX code
tablelines = []
indexlines = []
if standalone:
tablelines += textables.begin_latex_document()
# iterate to produce one LaTeX table per modelchem
for mc in mcs:
# prepare summary statistics
perr = self.compute_statistics(mc, benchmark=benchmark, sset=sset,
failoninc=failoninc, verbose=False,
returnindiv=False)
serrors = OrderedDict()
for db in self.dbdict.keys():
serrors[db] = None if perr[db] is None else format_errors(perr[db], mode=3)
serrors[self.dbse] = format_errors(perr[self.dbse], mode=3)
# prepare individual reactions and errors
terrors = OrderedDict()
isComplete = True
for (lmc, lbm, orxn) in self.get_reactions(mc, benchmark=benchmark,
sset=sset, failoninc=failoninc):
tmp = {}
dbrxn = orxn.dbrxn
tmp['dbrxn'] = dbrxn.replace('_', '\\_')
tmp['indx'] = r"""\textit{""" + str(orxn.indx) + """}"""
tmp['tagl'] = dbrxn.split('-')[0] + ' ' + \
(orxn.latex if orxn.latex else orxn.tagl.replace('_', '\\_'))
tmp['imag'] = None # name of primary rgt
bmdatum = orxn.data[lbm].value if lbm else None
mcdatum = orxn.data[lmc].value if lmc else None
tmp['bm'] = bmdatum
tmp['mc'] = mcdatum
if lmc and lbm:
tmp['e'] = mcdatum - bmdatum
tmp['pe'] = 100 * (mcdatum - bmdatum) / abs(bmdatum)
# TODO redefining errors not good practice
else:
isComplete = False
tmp['e'] = None
tmp['pe'] = None
terrors[dbrxn] = {}
for c in columnreservoir.keys():
terrors[dbrxn][c] = '' if tmp[c] is None else \
columnreservoir[c][2].format(tmp[c])
fancymodelchem = self.fancy_mcs(latex=True)[mc]
thistitle = title.format(dbse=self.dbse, mc=fancymodelchem,
sset='All' if sset == 'default' else sset.upper())
lref = [r"""tbl:qcdb"""]
if theme:
lref.append(theme)
lref.append(self.dbse)
if sset != 'default':
lref.append(sset)
lref.append(mc)
ref = '-'.join(lref)
# table intro
tablelines.append(r"""\begingroup""")
tablelines.append(r"""\squeezetable""")
tablelines.append(r"""\LTcapwidth=\textwidth""")
tablelines.append(r"""\begin{longtable}{%s}""" % (''.join([columnreservoir[col][0] for col in columnplan])))
tablelines.append(r"""\caption{%s""" % (thistitle))
tablelines.append(r"""\label{%s}} \\ """ % (ref))
tablelines.append(r"""\hline\hline""")
columntitles = [columnreservoir[col][1] for col in columnplan]
# initial header
tablelines.append(' & '.join(columntitles) + r""" \\ """)
tablelines.append(r"""\hline""")
tablelines.append(r"""\endfirsthead""")
# to be continued header
tablelines.append(r"""\multicolumn{%d}{@{}l}{\textit{\ldots continued} %s} \\ """ %
(len(columnplan), fancymodelchem))
tablelines.append(r"""\hline\hline""")
tablelines.append(' & '.join(columntitles) + r""" \\ """)
tablelines.append(r"""\hline""")
tablelines.append(r"""\endhead""")
# to be continued footer
tablelines.append(r"""\hline\hline""")
tablelines.append(r"""\multicolumn{%d}{r@{}}{\textit{continued \ldots}} \\ """ %
(len(columnplan)))
tablelines.append(r"""\endfoot""")
# final footer
tablelines.append(r"""\hline\hline""")
tablelines.append(r"""\endlastfoot""")
# table body
for dbrxn, stuff in terrors.items():
tablelines.append(' & '.join([stuff[col] for col in columnplan]) + r""" \\ """)
# table body summary
if any(col in ['e', 'pe'] for col in columnplan):
field_to_put_labels = [col for col in ['tagl', 'dbrxn', 'indx'] if col in columnplan]
if field_to_put_labels:
for block, blkerrors in serrors.items():
if blkerrors: # skip e.g., NBC block in HB of DB4
tablelines.append(r"""\hline""")
summlines = [[] for i in range(8)]
for col in columnplan:
if col == field_to_put_labels[0]:
summlines[0].append(
r"""\textbf{Summary Statistics: %s%s}%s""" %
('' if sset == 'default' else sset + r""" $\subset$ """,
block,
'' if isComplete else r""", \textit{partial}"""))
summlines[1].append(r"""\textit{Minimal Signed Error} """)
summlines[2].append(r"""\textit{Minimal Absolute Error} """)
summlines[3].append(r"""\textit{Maximal Signed Error} """)
summlines[4].append(r"""\textit{Maximal Absolute Error} """)
summlines[5].append(r"""\textit{Mean Signed Error} """)
summlines[6].append(r"""\textit{Mean Absolute Error} """)
summlines[7].append(r"""\textit{Root-Mean-Square Error} """)
elif col in ['e', 'pe']:
summlines[0].append('')
summlines[1].append(blkerrors['nex' + col])
summlines[2].append(blkerrors['min' + col])
summlines[3].append(blkerrors['pex' + col])
summlines[4].append(blkerrors['max' + col])
summlines[5].append(blkerrors['m' + col])
summlines[6].append(blkerrors['ma' + col])
summlines[7].append(blkerrors['rms' + col])
else:
for ln in range(len(summlines)):
summlines[ln].append('')
for ln in range(len(summlines)):
tablelines.append(' & '.join(summlines[ln]) + r""" \\ """)
# table conclusion
tablelines.append(r"""\end{longtable}""")
tablelines.append(r"""\endgroup""")
tablelines.append(r"""\clearpage""")
tablelines.append('\n\n')
# form table index
thisindextitle = indextitle.format(dbse=self.dbse, mc=fancymodelchem.strip(),
sset='All' if sset == 'default' else sset.upper())
indexlines.append(r"""\scriptsize \ref{%s} & \scriptsize %s \\ """ %
(ref, thisindextitle))
if standalone:
tablelines += textables.end_latex_document()
# form table and index return structures
if filename is None:
return tablelines, indexlines
else:
if filename.endswith('.tex'):
filename = filename[:-4]
with open(filename + '.tex', 'w') as handle:
handle.write('\n'.join(tablelines))
with open(filename + '_index.tex', 'w') as handle:
handle.write('\n'.join(indexlines) + '\n')
print("""\n LaTeX index written to {filename}_index.tex\n"""
""" LaTeX table written to {filename}.tex\n"""
""" >>> pdflatex {filename}\n"""
""" >>> open /Applications/Preview.app {filename}.pdf\n""".format(filename=filename))
filedict = {'data': os.path.abspath(filename) + '.tex',
'index': os.path.abspath(filename + '_index.tex')}
return filedict
def table_wrapper(self, mtd, bas, tableplan, benchmark='default',
opt=['CP'], err=['mae'], sset=['default'], dbse=None,
opttarget=None,
failoninc=True,
xlimit=4.0, xlines=[0.0, 0.3, 1.0],
ialimit=2.0,
plotpath='autogen',
subjoin=True,
title=None, indextitle=None,
suppressblanks=False,
standalone=True, theme=None, filename=None):
"""Prepares dictionary of errors for all combinations of *mtd*, *opt*,
*bas* with respect to model chemistry *benchmark*, mindful of *failoninc*.
The general plan for the table, as well as defaults for landscape,
footnotes, *title*, *indextitle, and *theme* are got from function
*tableplan*. Once error dictionary is ready, it and all other arguments
are passed along to textables.table_generic. Two arrays, one of table
lines and one of index lines are returned unless *filename* is given,
in which case they're written to file and a filedict returned.
"""
# get plan for table from *tableplan* and some default values
kwargs = {'plotpath': plotpath,
'subjoin': subjoin,
'xlines': xlines,
'xlimit': xlimit,
'ialimit': ialimit}
rowplan, columnplan, landscape, footnotes, \
suggestedtitle, suggestedtheme = tableplan(**kwargs)
#suggestedtitle, suggestedtheme = tableplan(plotpath=plotpath, subjoin=subjoin)
# make figure files write themselves
autothread = {}
autoliliowa = {}
if plotpath == 'autogen':
for col in columnplan:
if col[3].__name__ == 'flat':
if col[4] and autothread:
print('TODO: merge not handled')
elif col[4] or autothread:
autothread.update(col[4])
else:
autothread = {'dummy': True}
elif col[3].__name__ == 'liliowa':
autoliliowa = {'dummy': True}
# negotiate some defaults
dbse = [self.dbse] if dbse is None else dbse
theme = suggestedtheme if theme is None else theme
title = suggestedtitle if title is None else title
indextitle = title if indextitle is None else indextitle
opttarget = {'default': ['']} if opttarget is None else opttarget
def unify_options(orequired, opossible):
"""Perform a merge of options tags in *orequired* and *opossible* so
that the result is free of duplication and has the mode at the end.
"""
opt_combos = []
for oreq in orequired:
for opos in opossible:
pieces = sorted(set(oreq.split('_') + opos.split('_')))
if '' in pieces:
pieces.remove('')
for mode in ['CP', 'unCP', 'SA']:
if mode in pieces:
pieces.remove(mode)
pieces.append(mode)
pieces = '_'.join(pieces)
opt_combos.append(pieces)
return opt_combos
# gather list of model chemistries for table
mcs = ['-'.join(prod) for prod in itertools.product(mtd, opt, bas)]
mc_translator = {}
for m, o, b in itertools.product(mtd, opt, bas):
nominal_mc = '-'.join([m, o, b])
for oo in unify_options([o], opttarget['default']):
trial_mc = '-'.join([m, oo, b])
try:
perr = self.compute_statistics(trial_mc, benchmark=benchmark, sset='default', # prob. too restrictive by choosing subset
failoninc=False, verbose=False, returnindiv=False)
except KeyError as e:
continue
else:
mc_translator[nominal_mc] = trial_mc
break
else:
mc_translator[nominal_mc] = None
# compute errors
serrors = {}
for mc in mcs:
serrors[mc] = {}
for ss in self.sset.keys():
serrors[mc][ss] = {}
if mc_translator[mc] in self.mcs:
# Note: not handling when one component Wdb has one translated pattern and another another
perr = self.compute_statistics(mc_translator[mc], benchmark=benchmark, sset=ss,
failoninc=failoninc, verbose=False, returnindiv=False)
serrors[mc][ss][self.dbse] = format_errors(perr[self.dbse], mode=3)
if not failoninc:
mcsscounts = self.get_missing_reactions(mc_translator[mc], sset=ss)
serrors[mc][ss][self.dbse]['tgtcnt'] = mcsscounts[self.dbse][0]
serrors[mc][ss][self.dbse]['misscnt'] = len(mcsscounts[self.dbse][1])
if autothread:
if ('sset' in autothread and ss in autothread['sset']) or ('sset' not in autothread):
mcssplots = self.plot_flat(mc_translator[mc], benchmark=benchmark, sset=ss,
failoninc=failoninc, color='sapt', xlimit=xlimit, xlines=xlines, view=False,
saveas='flat_' + '-'.join([self.dbse, ss, mc]), relpath=True, graphicsformat=['pdf'])
serrors[mc][ss][self.dbse]['plotflat'] = mcssplots['pdf']
if autoliliowa and ss == 'default':
mcssplots = self.plot_liliowa(mc_translator[mc], benchmark=benchmark,
failoninc=failoninc, xlimit=ialimit, view=False,
saveas='liliowa_' + '-'.join([self.dbse, ss, mc]), relpath=True, graphicsformat=['pdf'])
serrors[mc][ss][self.dbse]['plotliliowa'] = mcssplots['pdf']
for db in self.dbdict.keys():
if perr[db] is None:
serrors[mc][ss][db] = None
else:
serrors[mc][ss][db] = format_errors(perr[db], mode=3)
if not failoninc:
serrors[mc][ss][db]['tgtcnt'] = mcsscounts[db][0]
serrors[mc][ss][db]['misscnt'] = len(mcsscounts[db][1])
else:
serrors[mc][ss][self.dbse] = format_errors(initialize_errors(), mode=3)
for db in self.dbdict.keys():
serrors[mc][ss][db] = format_errors(initialize_errors(), mode=3)
for key in serrors.keys():
print("""{:>35}{:>35}{}""".format(key, mc_translator[key], serrors[key]['default'][self.dbse]['mae']))
# find indices that would be neglected in a single sweep over table_generic
keysinplan = set(sum([col[-1].keys() for col in columnplan], rowplan))
obvious = {'dbse': dbse, 'sset': sset, 'mtd': mtd, 'opt': opt, 'bas': bas, 'err': err}
for key, vari in obvious.items():
if len(vari) == 1 or key in keysinplan:
del obvious[key]
iteroers = [(prod) for prod in itertools.product(*obvious.values())]
# commence to generate LaTeX code
tablelines = []
indexlines = []
if standalone:
tablelines += textables.begin_latex_document()
for io in iteroers:
actvargs = dict(zip(obvious.keys(), [[k] for k in io]))
nudbse = actvargs['dbse'] if 'dbse' in actvargs else dbse
nusset = actvargs['sset'] if 'sset' in actvargs else sset
numtd = actvargs['mtd'] if 'mtd' in actvargs else mtd
nuopt = actvargs['opt'] if 'opt' in actvargs else opt
nubas = actvargs['bas'] if 'bas' in actvargs else bas
nuerr = actvargs['err'] if 'err' in actvargs else err
table, index = textables.table_generic(
mtd=numtd, bas=nubas, opt=nuopt, err=nuerr, sset=nusset, dbse=nudbse,
rowplan=rowplan, columnplan=columnplan, serrors=serrors,
plotpath='' if plotpath == 'autogen' else plotpath,
subjoin=subjoin,
title=title, indextitle=indextitle,
suppressblanks=suppressblanks,
landscape=landscape, footnotes=footnotes,
standalone=False, theme=theme)
tablelines += table
tablelines.append('\n\n')
indexlines += index
if standalone:
tablelines += textables.end_latex_document()
# form table and index return structures
if filename is None:
return tablelines, indexlines
else:
if filename.endswith('.tex'):
filename = filename[:-4]
with open(filename + '.tex', 'w') as handle:
handle.write('\n'.join(tablelines))
with open(filename + '_index.tex', 'w') as handle:
handle.write('\n'.join(indexlines))
print("""\n LaTeX index written to {filename}_index.tex\n"""
""" LaTeX table written to {filename}.tex\n"""
""" >>> pdflatex {filename}\n"""
""" >>> open /Applications/Preview.app {filename}.pdf\n""".format(filename=filename))
filedict = {'data': os.path.abspath(filename) + '.tex',
'index': os.path.abspath(filename + '_index.tex')}
return filedict
def table_scrunch(self, plotpath, subjoin):
rowplan = ['mtd']
columnplan = [
['l', r'Method', '', textables.label, {}],
['c', r'Description', '', textables.empty, {}],
['d', r'aug-cc-pVDZ', 'unCP', textables.val, {'bas': 'adz', 'opt': 'unCP'}],
['d', r'aug-cc-pVDZ', 'CP', textables.val, {'bas': 'adz', 'opt': 'CP'}],
['d', r'aug-cc-pVTZ', 'unCP', textables.val, {'bas': 'atz', 'opt': 'unCP'}],
['d', r'aug-cc-pVTZ', 'CP', textables.val, {'bas': 'atz', 'opt': 'CP'}]]
footnotes = []
landscape = False
theme = 'summavg'
title = r"""Classification and Performance of model chemistries. Interaction energy [kcal/mol] {{err}} statistics.""".format()
return rowplan, columnplan, landscape, footnotes, title, theme
def table_merge_abbr(self, plotpath, subjoin):
"""Specialization of table_generic into table with minimal statistics
(three S22 and three overall) plus embedded slat diagram as suitable
for main paper. A single table is formed in sections by *bas* with
lines *mtd* within each section.
"""
rowplan = ['bas', 'mtd']
columnplan = [
['l', r"""Method \& Basis Set""", '', textables.label, {}],
['d', r'S22', 'HB', textables.val, {'sset': 'hb', 'dbse': 'S22'}],
['d', r'S22', 'MX/DD', textables.val, {'sset': 'mxdd', 'dbse': 'S22'}],
['d', r'S22', 'TT', textables.val, {'sset': 'tt', 'dbse': 'S22'}],
['d', r'Overall', 'HB', textables.val, {'sset': 'hb', 'dbse': 'DB4'}],
['d', r'Overall', 'MX/DD', textables.val, {'sset': 'mxdd', 'dbse': 'DB4'}],
['d', r'Overall', 'TT', textables.val, {'sset': 'tt', 'dbse': 'DB4'}],
['l', r"""Error Distribution\footnotemark[1]""",
r"""\includegraphics[width=6.67cm,height=3.5mm]{%s%s.pdf}""" % (plotpath, 'blank'),
textables.graphics, {}],
['d', r'Time', '', textables.empty, {}]]
# TODO Time column not right at all
footnotes = [fnreservoir['blankslat']]
landscape = False
theme = 'smmerge'
title = r"""Interaction energy [kcal/mol] {{err}} subset statistics with computed with {{opt}}{0}.""".format(
'' if subjoin else r""" and {bas}""")
return rowplan, columnplan, landscape, footnotes, title, theme
def table_merge_suppmat(self, plotpath, subjoin):
"""Specialization of table_generic into table with as many statistics
as will fit (mostly fullcurve and a few 5min) plus embedded slat
diagram as suitable for supplementary material. Multiple tables are
formed, one for each in *bas* with lines *mtd* within each table.
"""
rowplan = ['bas', 'mtd']
columnplan = [
['l', r"""Method \& Basis Set""", '', textables.label, {}],
['d', 'S22', 'HB', textables.val, {'sset': 'hb', 'dbse': 'S22'}],
['d', 'S22', 'MX', textables.val, {'sset': 'mx', 'dbse': 'S22'}],
['d', 'S22', 'DD', textables.val, {'sset': 'dd', 'dbse': 'S22'}],
['d', 'S22', 'TT', textables.val, {'sset': 'tt', 'dbse': 'S22'}],
['d', 'NBC10', 'MX', textables.val, {'sset': 'mx', 'dbse': 'NBC1'}],
['d', 'NBC10', 'DD', textables.val, {'sset': 'dd', 'dbse': 'NBC1'}],
['d', 'NBC10', 'TT', textables.val, {'sset': 'tt', 'dbse': 'NBC1'}],
['d', 'HBC6', 'HB/TT', textables.val, {'sset': 'tt', 'dbse': 'HBC1'}],
['d', 'HSG', 'HB', textables.val, {'sset': 'hb', 'dbse': 'HSG'}],
['d', 'HSG', 'MX', textables.val, {'sset': 'mx', 'dbse': 'HSG'}],
['d', 'HSG', 'DD', textables.val, {'sset': 'dd', 'dbse': 'HSG'}],
['d', 'HSG', 'TT', textables.val, {'sset': 'tt', 'dbse': 'HSG'}],
['d', 'Avg', 'TT ', textables.val, {'sset': 'tt', 'dbse': 'DB4'}],
['l', r"""Error Distribution\footnotemark[1]""",
r"""\includegraphics[width=6.67cm,height=3.5mm]{%s%s.pdf}""" % (plotpath, 'blank'),
textables.graphics, {}],
['d', 'NBC10', r"""TT\footnotemark[2]""", textables.val, {'sset': 'tt-5min', 'dbse': 'NBC1'}],
['d', 'HBC6', r"""TT\footnotemark[2] """, textables.val, {'sset': 'tt-5min', 'dbse': 'HBC1'}],
['d', 'Avg', r"""TT\footnotemark[2]""", textables.val, {'sset': 'tt-5min', 'dbse': 'DB4'}]]
footnotes = [fnreservoir['blankslat'], fnreservoir['5min']]
landscape = True
theme = 'lgmerge'
title = r"""Interaction energy [kcal/mol] {{err}} subset statistics with computed with {{opt}}{0}.""".format(
'' if subjoin else r""" and {bas}""")
return rowplan, columnplan, landscape, footnotes, title, theme
class DB4(Database):
def __init__(self, pythonpath=None, loadfrompickle=False, path=None):
"""Initialize FourDatabases object from SuperDatabase"""
Database.__init__(self, ['s22', 'nbc10', 'hbc6', 'hsg'], dbse='DB4',
pythonpath=pythonpath, loadfrompickle=loadfrompickle, path=path)
# # load up data and definitions
# self.load_qcdata_byproject('dft')
# self.load_qcdata_byproject('pt2')
# #self.load_qcdata_byproject('dhdft')
# self.load_subsets()
self.define_supersubsets()
self.define_supermodelchems()
def define_supersubsets(self):
"""
"""
self.sset['tt'] = ['default', 'default', 'default', 'default']
self.sset['hb'] = ['hb', None, 'default', 'hb']
self.sset['mx'] = ['mx', 'mx', None, 'mx']
self.sset['dd'] = ['dd', 'dd', None, 'dd']
self.sset['mxdd'] = ['mxdd', 'default', None, 'mxdd']
self.sset['pp'] = ['mxddpp', 'mxddpp', None, None]
self.sset['np'] = ['mxddnp', 'mxddnp', None, 'mxdd']
self.sset['tt-5min'] = ['default', '5min', '5min', 'default']
self.sset['hb-5min'] = ['hb', None, '5min', 'hb']
self.sset['mx-5min'] = ['mx', 'mx-5min', None, 'mx']
self.sset['dd-5min'] = ['dd', 'dd-5min', None, 'dd']
self.sset['mxdd-5min'] = ['mxdd', '5min', None, 'mxdd']
self.sset['pp-5min'] = ['mxddpp', 'mxddpp-5min', None, None]
self.sset['np-5min'] = ['mxddnp', 'mxddnp-5min', None, 'mxdd']
# def benchmark(self):
# """Returns the model chemistry label for the database's benchmark."""
# return 'C2001BENCH'
def define_supermodelchems(self):
"""
"""
self.benchmark = 'C2011BENCH'
self.mcs['C2010BENCH'] = ['S22A', 'NBC100', 'HBC60', 'HSG0']
self.mcs['C2011BENCH'] = ['S22B', 'NBC10A', 'HBC6A', 'HSGA']
self.mcs['CCSD-CP-adz'] = ['CCSD-CP-adz', 'CCSD-CP-hadz', 'CCSD-CP-adz', 'CCSD-CP-hadz']
self.mcs['CCSD-CP-atz'] = ['CCSD-CP-atz', 'CCSD-CP-hatz', 'CCSD-CP-atz', 'CCSD-CP-hatz']
self.mcs['CCSD-CP-adtz'] = ['CCSD-CP-adtz', 'CCSD-CP-hadtz', 'CCSD-CP-adtz', 'CCSD-CP-hadtz']
self.mcs['CCSD-CP-adtzadz'] = ['CCSD-CP-adtzadz', 'CCSD-CP-adtzhadz', 'CCSD-CP-adtzadz', 'CCSD-CP-adtzhadz']
self.mcs['CCSD-CP-atzadz'] = ['CCSD-CP-atzadz', 'CCSD-CP-atzhadz', 'CCSD-CP-atzadz', 'CCSD-CP-atzhadz']
self.mcs['CCSD-CP-atqzadz'] = ['CCSD-CP-atqzadz', 'CCSD-CP-atqzhadz', 'CCSD-CP-atqzadz', 'CCSD-CP-atqzhadz']
self.mcs['CCSD-CP-atzadtz'] = ['CCSD-CP-atzadtz', 'CCSD-CP-atzhadtz', 'CCSD-CP-atzadtz', 'CCSD-CP-atzhadtz']
self.mcs['CCSD-CP-atqzadtz'] = ['CCSD-CP-atqzadtz', 'CCSD-CP-atqzhadtz', 'CCSD-CP-atqzadtz',
'CCSD-CP-atqzhadtz']
self.mcs['CCSD-CP-atqzatz'] = ['CCSD-CP-atqzatz', 'CCSD-CP-atqzhatz', 'CCSD-CP-atqzatz', 'CCSD-CP-atqzhatz']
self.mcs['SCSCCSD-CP-adz'] = ['SCSCCSD-CP-adz', 'SCSCCSD-CP-hadz', 'SCSCCSD-CP-adz', 'SCSCCSD-CP-hadz']
self.mcs['SCSCCSD-CP-atz'] = ['SCSCCSD-CP-atz', 'SCSCCSD-CP-hatz', 'SCSCCSD-CP-atz', 'SCSCCSD-CP-hatz']
self.mcs['SCSCCSD-CP-adtz'] = ['SCSCCSD-CP-adtz', 'SCSCCSD-CP-hadtz', 'SCSCCSD-CP-adtz', 'SCSCCSD-CP-hadtz']
self.mcs['SCSCCSD-CP-adtzadz'] = ['SCSCCSD-CP-adtzadz', 'SCSCCSD-CP-adtzhadz', 'SCSCCSD-CP-adtzadz',
'SCSCCSD-CP-adtzhadz']
self.mcs['SCSCCSD-CP-atzadz'] = ['SCSCCSD-CP-atzadz', 'SCSCCSD-CP-atzhadz', 'SCSCCSD-CP-atzadz',
'SCSCCSD-CP-atzhadz']
self.mcs['SCSCCSD-CP-atqzadz'] = ['SCSCCSD-CP-atqzadz', 'SCSCCSD-CP-atqzhadz', 'SCSCCSD-CP-atqzadz',
'SCSCCSD-CP-atqzhadz']
self.mcs['SCSCCSD-CP-atzadtz'] = ['SCSCCSD-CP-atzadtz', 'SCSCCSD-CP-atzhadtz', 'SCSCCSD-CP-atzadtz',
'SCSCCSD-CP-atzhadtz']
self.mcs['SCSCCSD-CP-atqzadtz'] = ['SCSCCSD-CP-atqzadtz', 'SCSCCSD-CP-atqzhadtz', 'SCSCCSD-CP-atqzadtz',
'SCSCCSD-CP-atqzhadtz']
self.mcs['SCSCCSD-CP-atqzatz'] = ['SCSCCSD-CP-atqzatz', 'SCSCCSD-CP-atqzhatz', 'SCSCCSD-CP-atqzatz',
'SCSCCSD-CP-atqzhatz']
self.mcs['SCSMICCSD-CP-adz'] = ['SCSMICCSD-CP-adz', 'SCSMICCSD-CP-hadz', 'SCSMICCSD-CP-adz',
'SCSMICCSD-CP-hadz']
self.mcs['SCSMICCSD-CP-atz'] = ['SCSMICCSD-CP-atz', 'SCSMICCSD-CP-hatz', 'SCSMICCSD-CP-atz',
'SCSMICCSD-CP-hatz']
self.mcs['SCSMICCSD-CP-adtz'] = ['SCSMICCSD-CP-adtz', 'SCSMICCSD-CP-hadtz', 'SCSMICCSD-CP-adtz',
'SCSMICCSD-CP-hadtz']
self.mcs['SCSMICCSD-CP-adtzadz'] = ['SCSMICCSD-CP-adtzadz', 'SCSMICCSD-CP-adtzhadz', 'SCSMICCSD-CP-adtzadz',
'SCSMICCSD-CP-adtzhadz']
self.mcs['SCSMICCSD-CP-atzadz'] = ['SCSMICCSD-CP-atzadz', 'SCSMICCSD-CP-atzhadz', 'SCSMICCSD-CP-atzadz',
'SCSMICCSD-CP-atzhadz']
self.mcs['SCSMICCSD-CP-atqzadz'] = ['SCSMICCSD-CP-atqzadz', 'SCSMICCSD-CP-atqzhadz', 'SCSMICCSD-CP-atqzadz',
'SCSMICCSD-CP-atqzhadz']
self.mcs['SCSMICCSD-CP-atzadtz'] = ['SCSMICCSD-CP-atzadtz', 'SCSMICCSD-CP-atzhadtz', 'SCSMICCSD-CP-atzadtz',
'SCSMICCSD-CP-atzhadtz']
self.mcs['SCSMICCSD-CP-atqzadtz'] = ['SCSMICCSD-CP-atqzadtz', 'SCSMICCSD-CP-atqzhadtz', 'SCSMICCSD-CP-atqzadtz',
'SCSMICCSD-CP-atqzhadtz']
self.mcs['SCSMICCSD-CP-atqzatz'] = ['SCSMICCSD-CP-atqzatz', 'SCSMICCSD-CP-atqzhatz', 'SCSMICCSD-CP-atqzatz',
'SCSMICCSD-CP-atqzhatz']
self.mcs['CCSDT-CP-adz'] = ['CCSDT-CP-adz', 'CCSDT-CP-hadz', 'CCSDT-CP-adz', 'CCSDT-CP-hadz']
self.mcs['CCSDT-CP-atz'] = ['CCSDT-CP-atz', 'CCSDT-CP-hatz', 'CCSDT-CP-atz', 'CCSDT-CP-hatz']
self.mcs['CCSDT-CP-adtz'] = ['CCSDT-CP-adtz', 'CCSDT-CP-hadtz', 'CCSDT-CP-adtz', 'CCSDT-CP-hadtz']
self.mcs['CCSDT-CP-adtzadz'] = ['CCSDT-CP-adtzadz', 'CCSDT-CP-adtzhadz', 'CCSDT-CP-adtzadz',
'CCSDT-CP-adtzhadz']
self.mcs['CCSDT-CP-atzadz'] = ['CCSDT-CP-atzadz', 'CCSDT-CP-atzhadz', 'CCSDT-CP-atzadz', 'CCSDT-CP-atzhadz']
self.mcs['CCSDT-CP-atqzadz'] = ['CCSDT-CP-atqzadz', 'CCSDT-CP-atqzhadz', 'CCSDT-CP-atqzadz',
'CCSDT-CP-atqzhadz']
self.mcs['CCSDT-CP-atzadtz'] = ['CCSDT-CP-atzadtz', 'CCSDT-CP-atzhadtz', 'CCSDT-CP-atzadtz',
'CCSDT-CP-atzhadtz']
self.mcs['CCSDT-CP-atqzadtz'] = ['CCSDT-CP-atqzadtz', 'CCSDT-CP-atqzhadtz', 'CCSDT-CP-atqzadtz',
'CCSDT-CP-atqzhadtz']
self.mcs['CCSDT-CP-atqzatz'] = ['CCSDT-CP-atqzatz', 'CCSDT-CP-atqzhatz', 'CCSDT-CP-atqzatz',
'CCSDT-CP-atqzhatz']
# def make_pt2_flats(self):
# def plot_all_flats(self):
# """Generate pieces for inclusion into tables for PT2 paper.
# Note that DB4 flats use near-equilibrium subset.
#
# """
# Database.plot_all_flats(self, modelchem=None, sset='tt-5min', xlimit=4.0,
# graphicsformat=['pdf'])
def make_pt2_Figure_3(self):
"""Plot all the graphics needed for the calendar grey bars plot
in Fig. 3 of PT2.
Note that in the modern implementation of class DB4, would need to
pass ``sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min']`` to get
published figure.
"""
# Fig. bars (a)
self.plot_bars(['MP2-CP-dz', 'MP2-CP-jadz', 'MP2-CP-hadz', 'MP2-CP-adz',
'MP2-CP-tz', 'MP2-CP-matz', 'MP2-CP-jatz', 'MP2-CP-hatz', 'MP2-CP-atz',
'MP2-CP-dtz', 'MP2-CP-jadtz', 'MP2-CP-hadtz', 'MP2-CP-adtz',
'MP2-CP-qz', 'MP2-CP-aaqz', 'MP2-CP-maqz', 'MP2-CP-jaqz', 'MP2-CP-haqz', 'MP2-CP-aqz',
'MP2-CP-tqz', 'MP2-CP-matqz', 'MP2-CP-jatqz', 'MP2-CP-hatqz', 'MP2-CP-atqz',
'MP2-CP-a5z', 'MP2-CP-aq5z'])
self.plot_bars(['SCSMP2-CP-dz', 'SCSMP2-CP-jadz', 'SCSMP2-CP-hadz', 'SCSMP2-CP-adz',
'SCSMP2-CP-tz', 'SCSMP2-CP-matz', 'SCSMP2-CP-jatz', 'SCSMP2-CP-hatz', 'SCSMP2-CP-atz',
'SCSMP2-CP-dtz', 'SCSMP2-CP-jadtz', 'SCSMP2-CP-hadtz', 'SCSMP2-CP-adtz',
'SCSMP2-CP-qz', 'SCSMP2-CP-aaqz', 'SCSMP2-CP-maqz', 'SCSMP2-CP-jaqz', 'SCSMP2-CP-haqz',
'SCSMP2-CP-aqz',
'SCSMP2-CP-tqz', 'SCSMP2-CP-matqz', 'SCSMP2-CP-jatqz', 'SCSMP2-CP-hatqz', 'SCSMP2-CP-atqz',
'SCSMP2-CP-a5z', 'SCSMP2-CP-aq5z'])
self.plot_bars(['SCSNMP2-CP-dz', 'SCSNMP2-CP-jadz', 'SCSNMP2-CP-hadz', 'SCSNMP2-CP-adz',
'SCSNMP2-CP-tz', 'SCSNMP2-CP-matz', 'SCSNMP2-CP-jatz', 'SCSNMP2-CP-hatz', 'SCSNMP2-CP-atz',
'SCSNMP2-CP-dtz', 'SCSNMP2-CP-jadtz', 'SCSNMP2-CP-hadtz', 'SCSNMP2-CP-adtz',
'SCSNMP2-CP-qz', 'SCSNMP2-CP-aaqz', 'SCSNMP2-CP-maqz', 'SCSNMP2-CP-jaqz', 'SCSNMP2-CP-haqz',
'SCSNMP2-CP-aqz',
'SCSNMP2-CP-tqz', 'SCSNMP2-CP-matqz', 'SCSNMP2-CP-jatqz', 'SCSNMP2-CP-hatqz', 'SCSNMP2-CP-atqz',
'SCSNMP2-CP-a5z', 'SCSNMP2-CP-aq5z'])
self.plot_bars([None, None, None, None,
'SCSMIMP2-CP-tz', 'SCSMIMP2-CP-matz', 'SCSMIMP2-CP-jatz', 'SCSMIMP2-CP-hatz', 'SCSMIMP2-CP-atz',
'SCSMIMP2-CP-dtz', 'SCSMIMP2-CP-jadtz', 'SCSMIMP2-CP-hadtz', 'SCSMIMP2-CP-adtz',
'SCSMIMP2-CP-qz', 'SCSMIMP2-CP-aaqz', 'SCSMIMP2-CP-maqz', 'SCSMIMP2-CP-jaqz',
'SCSMIMP2-CP-haqz', 'SCSMIMP2-CP-aqz',
'SCSMIMP2-CP-tqz', 'SCSMIMP2-CP-matqz', 'SCSMIMP2-CP-jatqz', 'SCSMIMP2-CP-hatqz',
'SCSMIMP2-CP-atqz',
None, None])
self.plot_bars(['DWMP2-CP-dz', 'DWMP2-CP-jadz', 'DWMP2-CP-hadz', 'DWMP2-CP-adz',
'DWMP2-CP-tz', 'DWMP2-CP-matz', 'DWMP2-CP-jatz', 'DWMP2-CP-hatz', 'DWMP2-CP-atz',
'DWMP2-CP-dtz', 'DWMP2-CP-jadtz', 'DWMP2-CP-hadtz', 'DWMP2-CP-adtz',
'DWMP2-CP-qz', 'DWMP2-CP-aaqz', 'DWMP2-CP-maqz', 'DWMP2-CP-jaqz', 'DWMP2-CP-haqz',
'DWMP2-CP-aqz',
'DWMP2-CP-tqz', 'DWMP2-CP-matqz', 'DWMP2-CP-jatqz', 'DWMP2-CP-hatqz', 'DWMP2-CP-atqz',
'DWMP2-CP-a5z', 'DWMP2-CP-aq5z'])
self.plot_bars(['MP2C-CP-dz', 'MP2C-CP-jadz', 'MP2C-CP-hadz', 'MP2C-CP-adz',
'MP2C-CP-tz', 'MP2C-CP-matz', 'MP2C-CP-jatz', 'MP2C-CP-hatz', 'MP2C-CP-atz',
'MP2C-CP-dtz', 'MP2C-CP-jadtz', 'MP2C-CP-hadtz', 'MP2C-CP-adtz',
None, None, None, None, None, 'MP2C-CP-aqz',
None, None, None, None, 'MP2C-CP-atqz',
None, None])
self.plot_bars(['MP2C-CP-atqzdz', 'MP2C-CP-atqzjadz', 'MP2C-CP-atqzhadz', 'MP2C-CP-atqzadz',
'MP2C-CP-atqztz', 'MP2C-CP-atqzmatz', 'MP2C-CP-atqzjatz', 'MP2C-CP-atqzhatz', 'MP2C-CP-atqzatz',
'MP2C-CP-atqzdtz', 'MP2C-CP-atqzjadtz', 'MP2C-CP-atqzhadtz', 'MP2C-CP-atqzadtz'])
# Fig. bars (c)
self.plot_bars(['MP2F12-CP-dz', 'MP2F12-CP-jadz', 'MP2F12-CP-hadz', 'MP2F12-CP-adz',
'MP2F12-CP-tz', 'MP2F12-CP-matz', 'MP2F12-CP-jatz', 'MP2F12-CP-hatz', 'MP2F12-CP-atz',
'MP2F12-CP-dtz', 'MP2F12-CP-jadtz', 'MP2F12-CP-hadtz', 'MP2F12-CP-adtz',
'MP2F12-CP-aqz', 'MP2F12-CP-atqz'])
self.plot_bars(['SCSMP2F12-CP-dz', 'SCSMP2F12-CP-jadz', 'SCSMP2F12-CP-hadz', 'SCSMP2F12-CP-adz',
'SCSMP2F12-CP-tz', 'SCSMP2F12-CP-matz', 'SCSMP2F12-CP-jatz', 'SCSMP2F12-CP-hatz',
'SCSMP2F12-CP-atz',
'SCSMP2F12-CP-dtz', 'SCSMP2F12-CP-jadtz', 'SCSMP2F12-CP-hadtz', 'SCSMP2F12-CP-adtz',
'SCSMP2F12-CP-aqz', 'SCSMP2F12-CP-atqz'])
self.plot_bars(['SCSNMP2F12-CP-dz', 'SCSNMP2F12-CP-jadz', 'SCSNMP2F12-CP-hadz', 'SCSNMP2F12-CP-adz',
'SCSNMP2F12-CP-tz', 'SCSNMP2F12-CP-matz', 'SCSNMP2F12-CP-jatz', 'SCSNMP2F12-CP-hatz',
'SCSNMP2F12-CP-atz',
'SCSNMP2F12-CP-dtz', 'SCSNMP2F12-CP-jadtz', 'SCSNMP2F12-CP-adtz', 'SCSNMP2F12-CP-adtz',
'SCSNMP2F12-CP-aqz', 'SCSNMP2F12-CP-atqz'])
self.plot_bars([None, None, None, None,
'SCSMIMP2F12-CP-tz', 'SCSMIMP2F12-CP-matz', 'SCSMIMP2F12-CP-jatz', 'SCSMIMP2F12-CP-hatz',
'SCSMIMP2F12-CP-atz',
'SCSMIMP2F12-CP-dtz', 'SCSMIMP2F12-CP-jadtz', 'SCSMIMP2F12-CP-hadtz', 'SCSMIMP2F12-CP-adtz',
'SCSMIMP2F12-CP-aqz', 'SCSMIMP2F12-CP-atqz'])
self.plot_bars(['DWMP2F12-CP-dz', 'DWMP2F12-CP-jadz', 'DWMP2F12-CP-hadz', 'DWMP2F12-CP-adz',
'DWMP2F12-CP-tz', 'DWMP2F12-CP-matz', 'DWMP2F12-CP-jatz', 'DWMP2F12-CP-hatz', 'DWMP2F12-CP-atz',
'DWMP2F12-CP-dtz', 'DWMP2F12-CP-jadtz', 'DWMP2F12-CP-hadtz', 'DWMP2F12-CP-adtz',
'DWMP2F12-CP-aqz', 'DWMP2F12-CP-atqz'])
self.plot_bars(['MP2CF12-CP-dz', 'MP2CF12-CP-jadz', 'MP2CF12-CP-hadz', 'MP2CF12-CP-adz',
'MP2CF12-CP-tz', 'MP2CF12-CP-matz', 'MP2CF12-CP-jatz', 'MP2CF12-CP-hatz', 'MP2CF12-CP-atz',
'MP2CF12-CP-dtz', 'MP2CF12-CP-jadtz', 'MP2CF12-CP-hadtz', 'MP2CF12-CP-adtz',
'MP2CF12-CP-aqz', 'MP2CF12-CP-atqz'])
self.plot_bars(['MP2CF12-CP-atqzdz', 'MP2CF12-CP-atqzjadz', 'MP2CF12-CP-atqzhadz', 'MP2CF12-CP-atqzadz',
'MP2CF12-CP-atqztz', 'MP2CF12-CP-atqzmatz', 'MP2CF12-CP-atqzjatz', 'MP2CF12-CP-atqzhatz',
'MP2CF12-CP-atqzatz',
'MP2CF12-CP-atqzdtz', 'MP2CF12-CP-atqzjadtz', 'MP2CF12-CP-atqzhadtz', 'MP2CF12-CP-atqzadtz'])
def make_pt2_Figure_2(self):
"""Plot all the graphics needed for the diffuse augmented grey
bars plot in Fig. 2 of PT2.
Note that in the modern implementation of class DB4, would need to
pass ``sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min']`` to get
published figure.
"""
# Fig. bars (a)
self.plot_bars(['MP2-CP-adz', 'MP2-CP-atz', 'MP2-CP-adtz',
'MP2-CP-aqz', 'MP2-CP-atqz', 'MP2-CP-a5z', 'MP2-CP-aq5z'])
self.plot_bars(['SCSMP2-CP-adz', 'SCSMP2-CP-atz',
'SCSMP2-CP-adtz', 'SCSMP2-CP-aqz', 'SCSMP2-CP-atqz',
'SCSMP2-CP-a5z', 'SCSMP2-CP-aq5z'])
self.plot_bars(['SCSNMP2-CP-adz', 'SCSNMP2-CP-atz',
'SCSNMP2-CP-adtz', 'SCSNMP2-CP-aqz', 'SCSNMP2-CP-atqz',
'SCSNMP2-CP-a5z', 'SCSNMP2-CP-aq5z'])
self.plot_bars(['SCSMIMP2-CP-atz', 'SCSMIMP2-CP-atz',
'SCSMIMP2-CP-adtz', 'SCSMIMP2-CP-aqz', 'SCSMIMP2-CP-atqz'])
self.plot_bars(['SCSMIMP2-CP-tz', 'SCSMIMP2-CP-tz',
'SCSMIMP2-CP-dtz', 'SCSMIMP2-CP-qz', 'SCSMIMP2-CP-tqz'])
self.plot_bars(['DWMP2-CP-adz', 'DWMP2-CP-atz', 'DWMP2-CP-adtz',
'DWMP2-CP-aqz', 'DWMP2-CP-atqz', 'DWMP2-CP-a5z', 'DWMP2-CP-aq5z'])
self.plot_bars(['MP2C-CP-adz', 'MP2C-CP-adtzadz',
'MP2C-CP-atqzadz', 'MP2C-CP-aq5zadz', 'MP2C-CP-atz',
'MP2C-CP-atqzatz', 'MP2C-CP-aq5zatz', 'MP2C-CP-adtz',
'MP2C-CP-atqzadtz', 'MP2C-CP-aqz', 'MP2C-CP-atqz'])
# Fig. bars (b)
self.plot_bars(['MP3-CP-adz', 'MP3-CP-adtzadz', 'MP3-CP-atqzadz',
'MP3-CP-atz', 'MP3-CP-atqzatz', 'MP3-CP-adtz', 'MP3-CP-atqzadtz'])
self.plot_bars(['MP25-CP-adz', 'MP25-CP-adtzadz', 'MP25-CP-atqzadz',
'MP25-CP-atz', 'MP25-CP-atqzatz', 'MP25-CP-adtz', 'MP25-CP-atqzadtz'])
self.plot_bars(['CCSD-CP-adz', 'CCSD-CP-adtzadz', 'CCSD-CP-atqzadz',
'CCSD-CP-atz', 'CCSD-CP-atqzatz', 'CCSD-CP-adtz', 'CCSD-CP-atqzadtz'])
self.plot_bars(['SCSCCSD-CP-adz', 'SCSCCSD-CP-adtzadz',
'SCSCCSD-CP-atqzadz', 'SCSCCSD-CP-atz', 'SCSCCSD-CP-atqzatz',
'SCSCCSD-CP-adtz', 'SCSCCSD-CP-atqzadtz'])
self.plot_bars(['SCSMICCSD-CP-adz', 'SCSMICCSD-CP-adtzadz',
'SCSMICCSD-CP-atqzadz', 'SCSMICCSD-CP-atz', 'SCSMICCSD-CP-atqzatz',
'SCSMICCSD-CP-adtz', 'SCSMICCSD-CP-atqzadtz'])
self.plot_bars(['CCSDT-CP-adz', 'CCSDT-CP-adtzadz',
'CCSDT-CP-atqzadz', 'CCSDT-CP-atz', 'CCSDT-CP-atqzatz',
'CCSDT-CP-adtz', 'CCSDT-CP-atqzadtz'])
# Fig. bars (c)
self.plot_bars(['MP2F12-CP-adz', 'MP2F12-CP-atz', 'MP2F12-CP-adtz',
'MP2F12-CP-aqz', 'MP2F12-CP-atqz'])
self.plot_bars(['SCSMP2F12-CP-adz', 'SCSMP2F12-CP-atz',
'SCSMP2F12-CP-adtz', 'SCSMP2F12-CP-aqz', 'SCSMP2F12-CP-atqz'])
self.plot_bars(['SCSNMP2F12-CP-adz', 'SCSNMP2F12-CP-atz',
'SCSNMP2F12-CP-adtz', 'SCSNMP2F12-CP-aqz',
'SCSNMP2F12-CP-atqz'])
self.plot_bars(['SCSMIMP2F12-CP-atz', 'SCSMIMP2F12-CP-atz',
'SCSMIMP2F12-CP-adtz', 'SCSMIMP2F12-CP-aqz',
'SCSMIMP2F12-CP-atqz'])
self.plot_bars(['SCSMIMP2F12-CP-tz', 'SCSMIMP2F12-CP-tz', 'SCSMIMP2F12-CP-dtz'])
self.plot_bars(['DWMP2F12-CP-adz', 'DWMP2F12-CP-atz',
'DWMP2F12-CP-adtz', 'DWMP2F12-CP-aqz', 'DWMP2F12-CP-atqz'])
self.plot_bars(['MP2CF12-CP-adz', 'MP2CF12-CP-adtzadz',
'MP2CF12-CP-atqzadz', 'MP2CF12-CP-atz', 'MP2CF12-CP-atqzatz',
'MP2CF12-CP-adtz', 'MP2CF12-CP-atqzadtz', 'MP2CF12-CP-aqz',
'MP2CF12-CP-atqz'])
# Fig. bars (d)
self.plot_bars(['CCSDAF12-CP-adz', 'CCSDAF12-CP-adtzadz', 'CCSDAF12-CP-atqzadz'])
self.plot_bars(['CCSDBF12-CP-adz', 'CCSDBF12-CP-adtzadz', 'CCSDBF12-CP-atqzadz'])
self.plot_bars(['SCSCCSDAF12-CP-adz', 'SCSCCSDAF12-CP-adtzadz', 'SCSCCSDAF12-CP-atqzadz'])
self.plot_bars(['SCSCCSDBF12-CP-adz', 'SCSCCSDBF12-CP-adtzadz', 'SCSCCSDBF12-CP-atqzadz'])
self.plot_bars(['SCMICCSDAF12-CP-adz', 'SCMICCSDAF12-CP-adtzadz', 'SCMICCSDAF12-CP-atqzadz'])
self.plot_bars(['SCMICCSDBF12-CP-adz', 'SCMICCSDBF12-CP-adtzadz', 'SCMICCSDBF12-CP-atqzadz'])
self.plot_bars(['CCSDTAF12-CP-adz', 'CCSDTAF12-CP-adtzadz', 'CCSDTAF12-CP-atqzadz'])
self.plot_bars(['CCSDTBF12-CP-adz', 'CCSDTBF12-CP-adtzadz', 'CCSDTBF12-CP-atqzadz'])
self.plot_bars(['DWCCSDTF12-CP-adz', 'DWCCSDTF12-CP-adtzadz', 'DWCCSDTF12-CP-atqzadz'])
def plot_dhdft_flats(self):
"""Generate pieces for grey bars figure for DH-DFT paper."""
self.plot_all_flats(
['B97D3-CP-adz', 'PBED3-CP-adz', 'M11L-CP-adz', 'DLDFD-CP-adz', 'B3LYPD3-CP-adz', 'PBE0D3-CP-adz',
'WB97XD-CP-adz', 'M052X-CP-adz', 'M062X-CP-adz', 'M08HX-CP-adz', 'M08SO-CP-adz', 'M11-CP-adz',
'VV10-CP-adz',
'LCVV10-CP-adz', 'WB97XV-CP-adz', 'PBE02-CP-adz', 'WB97X2-CP-adz', 'B2PLYPD3-CP-adz',
'DSDPBEP86D2OPT-CP-adz', 'MP2-CP-adz'], sset='tt-5min')
self.plot_all_flats(['B97D3-unCP-adz', 'PBED3-unCP-adz', 'M11L-unCP-adz', 'DLDFD-unCP-adz', 'B3LYPD3-unCP-adz',
'PBE0D3-unCP-adz',
'WB97XD-unCP-adz', 'M052X-unCP-adz', 'M062X-unCP-adz', 'M08HX-unCP-adz', 'M08SO-unCP-adz',
'M11-unCP-adz', 'VV10-unCP-adz',
'LCVV10-unCP-adz', 'WB97XV-unCP-adz', 'PBE02-unCP-adz', 'WB97X2-unCP-adz',
'B2PLYPD3-unCP-adz', 'DSDPBEP86D2OPT-unCP-adz', 'MP2-unCP-adz'], sset='tt-5min')
self.plot_all_flats(
['B97D3-CP-atz', 'PBED3-CP-atz', 'M11L-CP-atz', 'DLDFD-CP-atz', 'B3LYPD3-CP-atz', 'PBE0D3-CP-atz',
'WB97XD-CP-atz', 'M052X-CP-atz', 'M062X-CP-atz', 'M08HX-CP-atz', 'M08SO-CP-atz', 'M11-CP-atz',
'VV10-CP-atz',
'LCVV10-CP-atz', 'WB97XV-CP-atz', 'PBE02-CP-atz', 'WB97X2-CP-atz', 'B2PLYPD3-CP-atz',
'DSDPBEP86D2OPT-CP-atz', 'MP2-CP-atz'], sset='tt-5min')
self.plot_all_flats(['B97D3-unCP-atz', 'PBED3-unCP-atz', 'M11L-unCP-atz', 'DLDFD-unCP-atz', 'B3LYPD3-unCP-atz',
'PBE0D3-unCP-atz',
'WB97XD-unCP-atz', 'M052X-unCP-atz', 'M062X-unCP-atz', 'M08HX-unCP-atz', 'M08SO-unCP-atz',
'M11-unCP-atz', 'VV10-unCP-atz',
'LCVV10-unCP-atz', 'WB97XV-unCP-atz', 'PBE02-unCP-atz', 'WB97X2-unCP-atz',
'B2PLYPD3-unCP-atz', 'DSDPBEP86D2OPT-unCP-atz', 'MP2-unCP-atz'], sset='tt-5min')
def make_dhdft_Figure_1(self):
"""Plot all the graphics needed for the grey bars plot
in Fig. 1 of DHDFT.
"""
# Fig. bars (a)
self.plot_bars([
'M052X-unCP-adz', 'M052X-CP-adz', 'M052X-unCP-atz', 'M052X-CP-atz', None,
'M062X-unCP-adz', 'M062X-CP-adz', 'M062X-unCP-atz', 'M062X-CP-atz', None,
'M08SO-unCP-adz', 'M08SO-CP-adz', 'M08SO-unCP-atz', 'M08SO-CP-atz', None,
'M08HX-unCP-adz', 'M08HX-CP-adz', 'M08HX-unCP-atz', 'M08HX-CP-atz', None,
'M11-unCP-adz', 'M11-CP-adz', 'M11-unCP-atz', 'M11-CP-atz', None,
'M11L-unCP-adz', 'M11L-CP-adz', 'M11L-unCP-atz', 'M11L-CP-atz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
# Fig. bars (b)
self.plot_bars([
'PBED3-unCP-adz', 'PBED3-CP-adz', 'PBED3-unCP-atz', 'PBED3-CP-atz', None,
'B97D3-unCP-adz', 'B97D3-CP-adz', 'B97D3-unCP-atz', 'B97D3-CP-atz', None,
'PBE0D3-unCP-adz', 'PBE0D3-CP-adz', 'PBE0D3-unCP-atz', 'PBE0D3-CP-atz', None,
'B3LYPD3-unCP-adz', 'B3LYPD3-CP-adz', 'B3LYPD3-unCP-atz', 'B3LYPD3-CP-atz', None,
'DLDFD-unCP-adz', 'DLDFD-CP-adz', 'DLDFD-unCP-atz', 'DLDFD-CP-atz', None,
'WB97XD-unCP-adz', 'WB97XD-CP-adz', 'WB97XD-unCP-atz', 'WB97XD-CP-atz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
# Fig. bars (c)
self.plot_bars([
'VV10-unCP-adz', 'VV10-CP-adz', 'VV10-unCP-atz', 'VV10-CP-atz', None, None,
'LCVV10-unCP-adz', 'LCVV10-CP-adz', 'LCVV10-unCP-atz', 'LCVV10-CP-atz', None, None,
'WB97XV-unCP-adz', 'WB97XV-CP-adz', 'WB97XV-unCP-atz', 'WB97XV-CP-atz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
# Fig. bars (d)
self.plot_bars([
'PBE02-unCP-adz', 'PBE02-CP-adz', 'PBE02-unCP-atz', 'PBE02-CP-atz', None,
'WB97X2-unCP-adz', 'WB97X2-CP-adz', 'WB97X2-unCP-atz', 'WB97X2-CP-atz', None,
'B2PLYPD3-unCP-adz', 'B2PLYPD3-CP-adz', 'B2PLYPD3-unCP-atz', 'B2PLYPD3-CP-atz', None,
'DSDPBEP86D2OPT-unCP-adz', 'DSDPBEP86D2OPT-CP-adz', 'DSDPBEP86D2OPT-unCP-atz', 'DSDPBEP86D2OPT-CP-atz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
# Fig. bars (e)
self.plot_bars([
'MP2-unCP-adz', 'MP2-CP-adz', 'MP2-unCP-atz', 'MP2-CP-atz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
def make_dhdft_Figure_2(self):
"""Plot all the graphics needed for the SAPT/DFT/WFN
comparison plot in Fig. 2 of DHDFT.
Note that benchmark set as reminder, not necessity, since default.
"""
self.plot_bars([
'SAPT0S-CP-jadz', 'SAPTDFT-CP-atz', 'SAPT2P-CP-adz', 'SAPT3M-CP-atz',
'SAPT2PCM-CP-atz', None, 'B97D3-unCP-atz', 'B3LYPD3-CP-adz',
'M052X-unCP-adz', 'WB97XD-CP-atz', 'WB97XV-CP-adz', 'WB97X2-CP-atz',
'DSDPBEP86D2OPT-CP-atz', 'B2PLYPD3-CP-atz', None, 'MP2-CP-atz',
'SCSMP2-CP-atz', 'SCSMIMP2-CP-qz', 'MP2C-CP-atqzadz',
'MP2CF12-CP-adz', 'SCMICCSDAF12-CP-adz', 'CCSDT-CP-atz',
'CCSDT-CP-atqzatz', 'DWCCSDTF12-CP-adz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'],
benchmark='C2011BENCH')
def plot_dhdft_modelchems(self):
self.plot_modelchems(
['B97D3-CP-adz', 'PBED3-CP-adz', 'M11L-CP-adz', 'DLDFD-CP-adz', 'B3LYPD3-CP-adz', 'PBE0D3-CP-adz',
'WB97XD-CP-adz', 'M052X-CP-adz', 'M062X-CP-adz', 'M08HX-CP-adz', 'M08SO-CP-adz', 'M11-CP-adz',
'VV10-CP-adz',
'LCVV10-CP-adz', 'WB97XV-CP-adz', 'PBE02-CP-adz', 'WB97X2-CP-adz', 'B2PLYPD3-CP-adz',
'DSDPBEP86D2OPT-CP-adz', 'MP2-CP-adz'], sset='tt-5min')
self.plot_modelchems(['B97D3-unCP-adz', 'PBED3-unCP-adz', 'M11L-unCP-adz', 'DLDFD-unCP-adz', 'B3LYPD3-unCP-adz',
'PBE0D3-unCP-adz',
'WB97XD-unCP-adz', 'M052X-unCP-adz', 'M062X-unCP-adz', 'M08HX-unCP-adz', 'M08SO-unCP-adz',
'M11-unCP-adz', 'VV10-unCP-adz',
'LCVV10-unCP-adz', 'WB97XV-unCP-adz', 'PBE02-unCP-adz', 'WB97X2-unCP-adz',
'B2PLYPD3-unCP-adz', 'DSDPBEP86D2OPT-unCP-adz', 'MP2-unCP-adz'], sset='tt-5min')
self.plot_modelchems(
['B97D3-CP-atz', 'PBED3-CP-atz', 'M11L-CP-atz', 'DLDFD-CP-atz', 'B3LYPD3-CP-atz', 'PBE0D3-CP-atz',
'WB97XD-CP-atz', 'M052X-CP-atz', 'M062X-CP-atz', 'M08HX-CP-atz', 'M08SO-CP-atz', 'M11-CP-atz',
'VV10-CP-atz',
'LCVV10-CP-atz', 'WB97XV-CP-atz', 'PBE02-CP-atz', 'WB97X2-CP-atz', 'B2PLYPD3-CP-atz',
'DSDPBEP86D2OPT-CP-atz', 'MP2-CP-atz'], sset='tt-5min')
self.plot_modelchems(['B97D3-unCP-atz', 'PBED3-unCP-atz', 'M11L-unCP-atz', 'DLDFD-unCP-atz', 'B3LYPD3-unCP-atz',
'PBE0D3-unCP-atz',
'WB97XD-unCP-atz', 'M052X-unCP-atz', 'M062X-unCP-atz', 'M08HX-unCP-atz', 'M08SO-unCP-atz',
'M11-unCP-atz', 'VV10-unCP-atz',
'LCVV10-unCP-atz', 'WB97XV-unCP-atz', 'PBE02-unCP-atz', 'WB97X2-unCP-atz',
'B2PLYPD3-unCP-atz', 'DSDPBEP86D2OPT-unCP-atz', 'MP2-unCP-atz'], sset='tt-5min')
def plot_minn_modelchems(self):
self.plot_modelchems(
['DLDFD-unCP-adz', 'M052X-unCP-adz', 'M062X-unCP-adz', 'M08HX-unCP-adz', 'M08SO-unCP-adz', 'M11-unCP-adz',
'M11L-unCP-adz',
'DLDFD-CP-adz', 'M052X-CP-adz', 'M062X-CP-adz', 'M08HX-CP-adz', 'M08SO-CP-adz', 'M11-CP-adz',
'M11L-CP-adz'])
self.plot_modelchems(
['DlDFD-unCP-atz', 'M052X-unCP-atz', 'M062X-unCP-atz', 'M08HX-unCP-atz', 'M08SO-unCP-atz', 'M11-unCP-atz',
'M11L-unCP-atz',
'DLDFD-CP-atz', 'M052X-CP-atz', 'M062X-CP-atz', 'M08HX-CP-atz', 'M08SO-CP-atz', 'M11-CP-atz',
'M11L-CP-atz'])
def make_dhdft_Table_I(self):
"""Generate the in-manuscript summary slat table for DHDFT.
"""
self.table_wrapper(mtd=['B97D3', 'PBED3', 'M11L', 'DLDFD', 'B3LYPD3',
'PBE0D3', 'WB97XD', 'M052X', 'M062X', 'M08HX',
'M08SO', 'M11', 'VV10', 'LCVV10', 'WB97XV',
'PBE02', 'WB97X2', 'DSDPBEP86D2OPT', 'B2PLYPD3',
'MP2', 'SCSNMP2', 'SCSMIMP2', 'MP2CF12', 'SCMICCSDAF12',
'SAPTDFT', 'SAPT0S', 'SAPT2P', 'SAPT3M', 'SAPT2PCM'],
bas=['adz', 'atz'],
tableplan=self.table_scrunch,
opt=['CP', 'unCP'], err=['mae'],
subjoin=None,
plotpath=None,
standalone=False, filename='tblssets_ex1')
def make_dhdft_Table_II(self):
"""Generate the in-manuscript CP slat table for DHDFT.
"""
self.table_wrapper(mtd=['B97D3', 'PBED3', 'M11L', 'DLDFD', 'B3LYPD3',
'PBE0D3', 'WB97XD', 'M052X', 'M062X', 'M08HX',
'M08SO', 'M11', 'VV10', 'LCVV10', 'WB97XV',
'PBE02', 'WB97X2', 'DSDPBEP86D2OPT', 'B2PLYPD3', 'MP2'],
bas=['adz', 'atz'],
tableplan=self.table_merge_abbr,
opt=['CP'], err=['mae'],
subjoin=True,
plotpath='analysis/flats/mplflat_', # proj still has 'mpl' prefix
standalone=False, filename='tblssets_ex2')
def make_dhdft_Table_III(self):
"""Generate the in-manuscript unCP slat table for DHDFT.
"""
self.table_wrapper(mtd=['B97D3', 'PBED3', 'M11L', 'DLDFD', 'B3LYPD3',
'PBE0D3', 'WB97XD', 'M052X', 'M062X', 'M08HX',
'M08SO', 'M11', 'VV10', 'LCVV10', 'WB97XV',
'PBE02', 'WB97X2', 'DSDPBEP86D2OPT', 'B2PLYPD3', 'MP2'],
bas=['adz', 'atz'],
tableplan=self.table_merge_abbr,
opt=['unCP'], err=['mae'],
subjoin=True,
plotpath='analysis/flats/mplflat_', # proj still has 'mpl' prefix
standalone=False, filename='tblssets_ex3')
def make_dhdft_Tables_SII(self):
"""Generate the subset details suppmat Part II tables and their indices for DHDFT.
"""
self.table_wrapper(mtd=['B97D3', 'PBED3', 'M11L', 'DLDFD', 'B3LYPD3',
'PBE0D3', 'WB97XD', 'M052X', 'M062X', 'M08HX',
'M08SO', 'M11', 'VV10', 'LCVV10', 'WB97XV',
'PBE02', 'WB97X2', 'DSDPBEP86D2OPT', 'B2PLYPD3'], # 'MP2']
bas=['adz', 'atz'],
tableplan=self.table_merge_suppmat,
opt=['CP', 'unCP'], err=['mae', 'mape'],
subjoin=False,
plotpath='analysis/flats/mplflat_', # proj still has 'mpl' prefix
standalone=False, filename='tblssets')
def make_dhdft_Tables_SIII(self):
"""Generate the per-reaction suppmat Part III tables and their indices for DHDFT.
"""
self.table_reactions(
['B97D3-unCP-adz', 'B97D3-CP-adz', 'B97D3-unCP-atz', 'B97D3-CP-atz',
'PBED3-unCP-adz', 'PBED3-CP-adz', 'PBED3-unCP-atz', 'PBED3-CP-atz',
'M11L-unCP-adz', 'M11L-CP-adz', 'M11L-unCP-atz', 'M11L-CP-atz',
'DLDFD-unCP-adz', 'DLDFD-CP-adz', 'DLDFD-unCP-atz', 'DLDFD-CP-atz',
'B3LYPD3-unCP-adz', 'B3LYPD3-CP-adz', 'B3LYPD3-unCP-atz', 'B3LYPD3-CP-atz',
'PBE0D3-unCP-adz', 'PBE0D3-CP-adz', 'PBE0D3-unCP-atz', 'PBE0D3-CP-atz',
'WB97XD-unCP-adz', 'WB97XD-CP-adz', 'WB97XD-unCP-atz', 'WB97XD-CP-atz',
'M052X-unCP-adz', 'M052X-CP-adz', 'M052X-unCP-atz', 'M052X-CP-atz',
'M062X-unCP-adz', 'M062X-CP-adz', 'M062X-unCP-atz', 'M062X-CP-atz',
'M08HX-unCP-adz', 'M08HX-CP-adz', 'M08HX-unCP-atz', 'M08HX-CP-atz',
'M08SO-unCP-adz', 'M08SO-CP-adz', 'M08SO-unCP-atz', 'M08SO-CP-atz',
'M11-unCP-adz', 'M11-CP-adz', 'M11-unCP-atz', 'M11-CP-atz',
'VV10-unCP-adz', 'VV10-CP-adz', 'VV10-unCP-atz', 'VV10-CP-atz',
'LCVV10-unCP-adz', 'LCVV10-CP-adz', 'LCVV10-unCP-atz', 'LCVV10-CP-atz',
'WB97XV-unCP-adz', 'WB97XV-CP-adz', 'WB97XV-unCP-atz', 'WB97XV-CP-atz',
'PBE02-unCP-adz', 'PBE02-CP-adz', 'PBE02-unCP-atz', 'PBE02-CP-atz',
'WB97X2-unCP-adz', 'WB97X2-CP-adz', 'WB97X2-unCP-atz', 'WB97X2-CP-atz',
'DSDPBEP86D2OPT-unCP-adz', 'DSDPBEP86D2OPT-CP-adz', 'DSDPBEP86D2OPT-unCP-atz', 'DSDPBEP86D2OPT-CP-atz',
'B2PLYPD3-unCP-adz', 'B2PLYPD3-CP-adz', 'B2PLYPD3-unCP-atz', 'B2PLYPD3-CP-atz'],
# 'MP2-unCP-adz', 'MP2-CP-adz', 'MP2-unCP-atz', 'MP2-CP-atz'],
standalone=False, filename='tblrxn_all')
class ThreeDatabases(Database):
"""
"""
def __init__(self, pythonpath=None):
"""Initialize ThreeDatabases object from Database"""
Database.__init__(self, ['s22', 'a24', 'hsg'], dbse='DB3', pythonpath=None)
# load up data and definitions
self.load_qcdata_byproject('pt2')
self.load_qcdata_byproject('dilabio')
self.load_qcdata_byproject('f12dilabio')
self.load_subsets()
self.define_supersubsets()
self.define_supermodelchems()
def define_supersubsets(self):
"""
"""
self.sset['tt'] = ['default', 'default', 'default']
self.sset['hb'] = ['hb', 'hb', 'hb']
self.sset['mx'] = ['mx', 'mx', 'mx']
self.sset['dd'] = ['dd', 'dd', 'dd']
self.sset['mxdd'] = ['mxdd', 'mxdd', 'mxdd']
self.sset['pp'] = ['mxddpp', 'mxddpp', 'mxddpp']
self.sset['np'] = ['mxddnp', 'mxddnp', 'mxddnp']
self.sset['tt-5min'] = ['default', 'default', 'default']
self.sset['hb-5min'] = ['hb', 'hb', 'hb']
self.sset['mx-5min'] = ['mx', 'mx', 'mx']
self.sset['dd-5min'] = ['dd', 'dd', 'dd']
self.sset['mxdd-5min'] = ['mxdd', 'mxdd', 'mxdd']
self.sset['pp-5min'] = ['mxddpp', 'mxddpp', 'mxddpp']
self.sset['np-5min'] = ['mxddnp', 'mxddnp', 'mxddnp']
self.sset['weak'] = ['weak', 'weak', 'weak']
self.sset['weak_hb'] = ['weak_hb', None, 'weak_hb']
self.sset['weak_mx'] = ['weak_mx', 'weak_mx', 'weak_mx']
self.sset['weak_dd'] = ['weak_dd', 'weak_dd', 'weak_dd']
def define_supermodelchems(self):
"""
"""
self.mc['CCSD-CP-adz'] = ['CCSD-CP-adz', 'CCSD-CP-hadz', 'CCSD-CP-adz']
self.mc['CCSD-CP-atz'] = ['CCSD-CP-atz', 'CCSD-CP-hatz', 'CCSD-CP-atz']
self.mc['CCSD-CP-adtz'] = ['CCSD-CP-adtz', 'CCSD-CP-hadtz', 'CCSD-CP-adtz']
self.mc['CCSD-CP-adtzadz'] = ['CCSD-CP-adtzadz', 'CCSD-CP-adtzhadz', 'CCSD-CP-adtzadz']
self.mc['CCSD-CP-atzadz'] = ['CCSD-CP-atzadz', 'CCSD-CP-atzhadz', 'CCSD-CP-atzadz']
self.mc['CCSD-CP-atqzadz'] = ['CCSD-CP-atqzadz', 'CCSD-CP-atqzhadz', 'CCSD-CP-atqzadz']
self.mc['CCSD-CP-atzadtz'] = ['CCSD-CP-atzadtz', 'CCSD-CP-atzhadtz', 'CCSD-CP-atzadtz']
self.mc['CCSD-CP-atqzadtz'] = ['CCSD-CP-atqzadtz', 'CCSD-CP-atqzhadtz', 'CCSD-CP-atqzadtz']
self.mc['CCSD-CP-atqzatz'] = ['CCSD-CP-atqzatz', 'CCSD-CP-atqzhatz', 'CCSD-CP-atqzatz']
self.mc['CCSDT-CP-adz'] = ['CCSDT-CP-adz', 'CCSDT-CP-hadz', 'CCSDT-CP-adz']
self.mc['CCSDT-CP-atz'] = ['CCSDT-CP-atz', 'CCSDT-CP-hatz', 'CCSDT-CP-atz']
self.mc['CCSDT-CP-adtz'] = ['CCSDT-CP-adtz', 'CCSDT-CP-hadtz', 'CCSDT-CP-adtz']
self.mc['CCSDT-CP-adtzadz'] = ['CCSDT-CP-adtzadz', 'CCSDT-CP-adtzhadz', 'CCSDT-CP-adtzadz']
self.mc['CCSDT-CP-atzadz'] = ['CCSDT-CP-atzadz', 'CCSDT-CP-atzhadz', 'CCSDT-CP-atzadz']
self.mc['CCSDT-CP-atqzadz'] = ['CCSDT-CP-atqzadz', 'CCSDT-CP-atqzhadz', 'CCSDT-CP-atqzadz']
self.mc['CCSDT-CP-atzadtz'] = ['CCSDT-CP-atzadtz', 'CCSDT-CP-atzhadtz', 'CCSDT-CP-atzadtz']
self.mc['CCSDT-CP-atqzadtz'] = ['CCSDT-CP-atqzadtz', 'CCSDT-CP-atqzhadtz', 'CCSDT-CP-atqzadtz']
self.mc['CCSDT-CP-atqzatz'] = ['CCSDT-CP-atqzatz', 'CCSDT-CP-atqzhatz', 'CCSDT-CP-atqzatz']
# print certain statistic for all 4 db and summary and indiv sys if min or max
fnreservoir = {}
fnreservoir['blankslat'] = r"""Errors with respect to Benchmark. Guide lines are at 0, 0.3, and 1.0 kcal/mol overbound ($-$) and underbound ($+$)."""
fnreservoir['5min'] = r"""Only equilibrium and near-equilibrium systems included. (All S22 and HSG, 50/194 NBC10, 28/118 HBC6.)"""
fnreservoir['liliowa'] = r"""{0}MAE (dark by {1} kcal/mol) for subsets in residue classes cation, anion, polar, aliphatic, \& aromatic (L to R)."""
fnreservoir['flat'] = r"""{0}Errors with respect to benchmark within $\pm${1} kcal/mol. Guide lines are at {2} overbound ($-$) and underbound ($+$)."""
| psi4/psi4 | psi4/driver/qcdb/dbwrap.py | Python | lgpl-3.0 | 174,502 | [
"Gaussian",
"Psi4",
"PyMOL"
] | 78ee65e48b03ae4950f9e283584827ffe9cc468d6fb1b1256324bfd74f8dedc9 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Unit tests for moldenwriter module."""
import os
import unittest
import cclib
from cclib.io.filewriter import MissingAttributeError
from cclib.io.moldenwriter import MoldenReformatter
from cclib.io.moldenwriter import round_molden
__filedir__ = os.path.dirname(__file__)
__filepath__ = os.path.realpath(__filedir__)
__datadir__ = os.path.join(__filepath__, "..", "..")
__testdir__ = __filedir__
class MOLDENTest(unittest.TestCase):
def test_missing_attribute_error(self):
"""Check if MissingAttributeError is raised as expected."""
fpath = os.path.join(__datadir__,
"data/GAMESS/basicGAMESS-US2014/dvb_un_sp.out")
required_attrs = ['atomcoords', 'atomnos', 'natom']
for attr in required_attrs:
data = cclib.io.ccopen(fpath).parse()
delattr(data, attr)
# Molden files cannot be wriiten if required attrs are missing.
with self.assertRaises(MissingAttributeError):
cclib.io.moldenwriter.MOLDEN(data)
def test_atoms_section_size(self):
"""Check if size of Atoms section is equal to expected."""
fpath = os.path.join(__datadir__,
"data/GAMESS/basicGAMESS-US2014/dvb_un_sp.out")
data = cclib.io.ccopen(fpath).parse()
writer = cclib.io.moldenwriter.MOLDEN(data)
# Check size of Atoms section.
self.assertEqual(len(writer._coords_from_ccdata(-1)), data.natom)
def test_gto_section_size(self):
"""Check if size of GTO section is equal to expected."""
fpath = os.path.join(__datadir__,
"data/GAMESS/basicGAMESS-US2014/dvb_un_sp.out")
data = cclib.io.ccopen(fpath).parse()
writer = cclib.io.moldenwriter.MOLDEN(data)
# Check size of GTO section.
size_gto_ccdata = 0
for atom in data.gbasis:
size_gto_ccdata += 1
for prims in atom:
size_gto_ccdata += len(prims[1]) + 1
# Filter blank lines.
size_gto_writer = len(list(filter(None, writer._gto_from_ccdata())))
self.assertEqual(size_gto_writer, size_gto_ccdata)
def test_mo_section_size(self):
"""Check if size of MO section is equal to expected."""
fpath = os.path.join(__datadir__,
"data/GAMESS/basicGAMESS-US2014/dvb_un_sp.out")
data = cclib.io.ccopen(fpath).parse()
writer = cclib.io.moldenwriter.MOLDEN(data)
# Check size of MO section.
size_mo_ccdata = 0
extra = 4 if hasattr(data, 'mosyms') else 3
for i in range(data.mult):
size_mo_ccdata += len(data.moenergies[i]) *\
(len(data.mocoeffs[i][0]) + extra)
# Filter blank lines.
size_mo_writer = len(list(filter(None, writer._mo_from_ccdata())))
self.assertEqual(size_mo_writer, size_mo_ccdata)
def test_round_molden(self):
"""Check if Molden Style number rounding works as expected."""
# If the 6th digit after dot is greater than 5, but is not 7,
# round the number upto 6th place.
# Else truncate at 6th digit after dot.
self.assertEqual(round_molden(1), 1)
self.assertEqual(round_molden(-1), -1)
self.assertEqual(round_molden(0.999995789), 0.999995)
self.assertEqual(round_molden(-0.999995789), -0.999995)
self.assertEqual(round_molden(0.999996789), 0.999997)
self.assertEqual(round_molden(-0.999997789), -0.999997)
self.assertEqual(round_molden(0.999997789), 0.999997)
self.assertEqual(round_molden(-0.999998789), -0.999999)
self.assertEqual(round_molden(-0.999999999), -1.0)
def test_molden_cclib_diff(self):
"""Check if file written by cclib matched file written by Molden."""
filenames = ['dvb_un_sp', 'C_bigbasis', 'water_mp2']
for fn in filenames:
fpath = os.path.join(__datadir__,
"data/GAMESS/basicGAMESS-US2014/"+fn+".out")
data = cclib.io.ccopen(fpath).parse()
cclib_out = cclib.io.moldenwriter.MOLDEN(data).generate_repr()
# Reformat cclib's output to remove extra spaces.
cclib_out_formatted = MoldenReformatter(cclib_out).reformat()
fpath = os.path.join(__testdir__, "data/molden5.7_"+fn+".molden")
molden_out = open(fpath).read()
# Reformat Molden's output to remove extra spaces,
# and fix number formatting.
molden_out_formatted = MoldenReformatter(molden_out).reformat()
# Assert if reformatted files from both writers are same.
self.assertMultiLineEqual(molden_out_formatted,
cclib_out_formatted)
if __name__ == "__main__":
unittest.main()
| gaursagar/cclib | test/io/testmoldenwriter.py | Python | bsd-3-clause | 5,052 | [
"GAMESS",
"cclib"
] | 739d71037b62b972b60efcdea151c0a73d76d3275ae06fc4ba0b4f594b10730f |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import (
dec,
assert_,
assert_equal,
)
import MDAnalysis as mda
from MDAnalysisTests import parser_not_found
from MDAnalysisTests.datafiles import PSF, DCD
class TestResidue(object):
# Legacy tests from before 363
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def setUp(self):
self.universe = mda.Universe(PSF, DCD)
self.res = self.universe.residues[100]
def test_type(self):
assert_(isinstance(self.res, mda.core.groups.Residue))
assert_equal(self.res.resname, "ILE")
assert_equal(self.res.resid, 101)
def test_index(self):
atom = self.res.atoms[2]
assert_(isinstance(atom, mda.core.groups.Atom))
assert_equal(atom.name, "CA")
assert_equal(atom.index, 1522)
assert_equal(atom.resid, 101)
def test_atom_order(self):
assert_equal(self.res.atoms.indices,
sorted(self.res.atoms.indices))
| alejob/mdanalysis | testsuite/MDAnalysisTests/core/test_residue.py | Python | gpl-2.0 | 2,055 | [
"MDAnalysis"
] | f5f267090a93988ab14125a773a51d77f96e5de24d9d9ebad6f0a5816e17c242 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class visionCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'add_product_to_product_set': ('name', 'product', ),
'async_batch_annotate_files': ('requests', 'parent', ),
'async_batch_annotate_images': ('requests', 'output_config', 'parent', ),
'batch_annotate_files': ('requests', 'parent', ),
'batch_annotate_images': ('requests', 'parent', ),
'create_product': ('parent', 'product', 'product_id', ),
'create_product_set': ('parent', 'product_set', 'product_set_id', ),
'create_reference_image': ('parent', 'reference_image', 'reference_image_id', ),
'delete_product': ('name', ),
'delete_product_set': ('name', ),
'delete_reference_image': ('name', ),
'get_product': ('name', ),
'get_product_set': ('name', ),
'get_reference_image': ('name', ),
'import_product_sets': ('parent', 'input_config', ),
'list_products': ('parent', 'page_size', 'page_token', ),
'list_product_sets': ('parent', 'page_size', 'page_token', ),
'list_products_in_product_set': ('name', 'page_size', 'page_token', ),
'list_reference_images': ('parent', 'page_size', 'page_token', ),
'purge_products': ('parent', 'product_set_purge_config', 'delete_orphan_products', 'force', ),
'remove_product_from_product_set': ('name', 'product', ),
'update_product': ('product', 'update_mask', ),
'update_product_set': ('product_set', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=visionCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the vision client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| googleapis/python-vision | scripts/fixup_vision_v1_keywords.py | Python | apache-2.0 | 7,242 | [
"VisIt"
] | ea5b7b3200e3857331195514466f8626e3ab42a37d8b5136a6b1c9a4431803ac |
import numpy.random as random
import numpy as np
from ase import Atoms
from ase.calculators.neighborlist import NeighborList
from ase.lattice import bulk
atoms = Atoms(numbers=range(10),
cell=[(0.2, 1.2, 1.4),
(1.4, 0.1, 1.6),
(1.3, 2.0, -0.1)])
atoms.set_scaled_positions(3 * random.random((10, 3)) - 1)
def count(nl, atoms):
c = np.zeros(len(atoms), int)
R = atoms.get_positions()
cell = atoms.get_cell()
d = 0.0
for a in range(len(atoms)):
i, offsets = nl.get_neighbors(a)
for j in i:
c[j] += 1
c[a] += len(i)
d += (((R[i] + np.dot(offsets, cell) - R[a])**2).sum(1)**0.5).sum()
return d, c
for sorted in [False, True]:
for p1 in range(2):
for p2 in range(2):
for p3 in range(2):
print(p1, p2, p3)
atoms.set_pbc((p1, p2, p3))
nl = NeighborList(atoms.numbers * 0.2 + 0.5,
skin=0.0, sorted=sorted)
nl.update(atoms)
d, c = count(nl, atoms)
atoms2 = atoms.repeat((p1 + 1, p2 + 1, p3 + 1))
nl2 = NeighborList(atoms2.numbers * 0.2 + 0.5,
skin=0.0, sorted=sorted)
nl2.update(atoms2)
d2, c2 = count(nl2, atoms2)
c2.shape = (-1, 10)
dd = d * (p1 + 1) * (p2 + 1) * (p3 + 1) - d2
print(dd)
print(c2 - c)
assert abs(dd) < 1e-10
assert not (c2 - c).any()
h2 = Atoms('H2', positions=[(0, 0, 0), (0, 0, 1)])
nl = NeighborList([0.5, 0.5], skin=0.1, sorted=True, self_interaction=False)
assert nl.update(h2)
assert not nl.update(h2)
assert (nl.get_neighbors(0)[0] == [1]).all()
h2[1].z += 0.09
assert not nl.update(h2)
assert (nl.get_neighbors(0)[0] == [1]).all()
h2[1].z += 0.09
assert nl.update(h2)
assert (nl.get_neighbors(0)[0] == []).all()
assert nl.nupdates == 2
x = bulk('X', 'fcc', a=2**0.5)
print(x)
nl = NeighborList([0.5], skin=0.01, bothways=True, self_interaction=False)
nl.update(x)
assert len(nl.get_neighbors(0)[0]) == 12
nl = NeighborList([0.5] * 27, skin=0.01, bothways=True, self_interaction=False)
nl.update(x * (3, 3, 3))
for a in range(27):
assert len(nl.get_neighbors(a)[0]) == 12
assert not np.any(nl.get_neighbors(13)[1])
| suttond/MODOI | ase/test/neighbor.py | Python | lgpl-3.0 | 2,402 | [
"ASE"
] | fa3dde27298f06f5027ac97d763d390854412f7e6092dd7f47a113c2fda20848 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2008 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
"""Slaves for payment creation
This slaves will be used when payments are being created.
"""
from copy import deepcopy
import datetime
from decimal import Decimal
from dateutil.relativedelta import relativedelta
import gtk
from kiwi import ValueUnset
from kiwi.component import get_utility
from kiwi.currency import format_price, currency
from kiwi.datatypes import ValidationError, converter
from kiwi.python import Settable
from kiwi.utils import gsignal
from kiwi.ui.delegates import GladeSlaveDelegate
from kiwi.ui.objectlist import Column
from stoqlib.api import api
from stoqlib.domain.events import CreatePaymentEvent
from stoqlib.domain.payment.card import CreditProvider, CreditCardData
from stoqlib.domain.payment.card import CardPaymentDevice
from stoqlib.domain.payment.group import PaymentGroup
from stoqlib.domain.payment.method import PaymentMethod
from stoqlib.domain.payment.payment import Payment, PaymentChangeHistory
from stoqlib.domain.payment.renegotiation import PaymentRenegotiation
from stoqlib.domain.purchase import PurchaseOrder
from stoqlib.domain.returnedsale import ReturnedSale
from stoqlib.domain.sale import Sale
from stoqlib.domain.stockdecrease import StockDecrease
from stoqlib.enums import CreatePaymentStatus
from stoqlib.exceptions import SellError, PaymentMethodError
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.editors.baseeditor import BaseEditorSlave, BaseEditor
from stoqlib.gui.interfaces import IDomainSlaveMapper
from stoqlib.lib.dateutils import (INTERVALTYPE_MONTH,
get_interval_type_items,
create_date_interval,
localdatetime,
localtoday,
localnow)
from stoqlib.lib.defaults import DECIMAL_PRECISION
from stoqlib.lib.message import info, warning
from stoqlib.lib.payment import generate_payments_values
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.pluginmanager import get_plugin_manager
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
#
# Temporary Objects
#
DEFAULT_INSTALLMENTS_NUMBER = 1
DEFAULT_INTERVALS = 1
DEFAULT_INTERVAL_TYPE = INTERVALTYPE_MONTH
class _BaseTemporaryMethodData(object):
def __init__(self, first_duedate=None,
installments_number=None):
self.first_duedate = first_duedate or localtoday().date()
self.installments_number = (installments_number or
DEFAULT_INSTALLMENTS_NUMBER)
self.intervals = DEFAULT_INTERVALS
self.interval_type = DEFAULT_INTERVAL_TYPE
self.auth_number = None
self.bank_id = None
self.bank_branch = None
self.bank_account = None
self.bank_first_check_number = None
class _TemporaryCreditProviderGroupData(_BaseTemporaryMethodData):
def __init__(self, provider=None, device=None):
self.provider = provider
self.device = device
_BaseTemporaryMethodData.__init__(self)
class _TemporaryPaymentData(object):
def __init__(self, description, value, due_date,
payment_number=None, bank_account=None):
self.description = description
self.value = value
self.due_date = due_date
self.payment_number = payment_number
self.bank_account = bank_account
def __repr__(self):
return '<_TemporaryPaymentData>'
class _TemporaryBankData(object):
def __init__(self, bank_number=None, bank_branch=None, bank_account=None):
self.bank_number = bank_number
self.bank_branch = bank_branch
self.bank_account = bank_account
#
# Editors
#
class _BasePaymentDataEditor(BaseEditor):
"""A base editor to set payment information.
"""
gladefile = 'BasePaymentDataEditor'
model_type = _TemporaryPaymentData
payment_widgets = ('due_date', 'value', 'payment_number')
slave_holder = 'bank_data_slave'
def __init__(self, model):
BaseEditor.__init__(self, None, model)
#
# Private Methods
#
def _setup_widgets(self):
self.payment_number.grab_focus()
#
# BaseEditorSlave hooks
#
def get_title(self, model):
return _(u"Edit '%s'") % model.description
def setup_proxies(self):
self._setup_widgets()
self.add_proxy(self.model, self.payment_widgets)
#
# Kiwi callbacks
#
def on_due_date__validate(self, widget, value):
if sysparam.get_bool('ALLOW_OUTDATED_OPERATIONS'):
return
if value < localtoday().date():
return ValidationError(_(u"Expected installment due date "
"must be set to a future date"))
def on_value__validate(self, widget, value):
if value < currency(0):
return ValidationError(_(u"The value must be "
"a positive number"))
class CheckDataEditor(_BasePaymentDataEditor):
"""An editor to set payment information of check payment method.
"""
#
# BaseEditorSlave hooks
#
def setup_slaves(self):
bank_data_slave = BankDataSlave(self.model.bank_account)
if self.get_slave(self.slave_holder):
self.detach_slave(self.slave_holder)
self.attach_slave(self.slave_holder, bank_data_slave)
class BankDataSlave(BaseEditorSlave):
"""A simple slave that contains only a hbox with fields to bank name and
its branch. This slave is used by payment method slaves that has reference
to a BankAccount object.
"""
gladefile = 'BankDataSlave'
model_type = _TemporaryBankData
proxy_widgets = ('bank_number', 'bank_branch', 'bank_account')
def __init__(self, model):
self.model = model
BaseEditorSlave.__init__(self, None, self.model)
#
# BaseEditorSlave hooks
#
def setup_proxies(self):
self.add_proxy(self.model, self.proxy_widgets)
class PaymentListSlave(GladeSlaveDelegate):
"""A slave to manage payments with one/multiple installment(s)"""
domain = 'stoq'
gladefile = 'PaymentListSlave'
gsignal('payment-edited')
def __init__(self, payment_type, group, branch, method, total_value,
editor_class, parent):
self.parent = parent
self.branch = branch
self.payment_type = payment_type
self.group = group
self.total_value = total_value
self.editor_class = editor_class
self.method = method
GladeSlaveDelegate.__init__(self, gladefile=self.gladefile)
self._setup_widgets()
#
# Private Methods
#
def _can_edit_payments(self):
return self.method.method_name != 'money'
def _has_bank_account(self):
return self.method.method_name == u'check'
def _is_check_number_mandatory(self):
return (api.sysparam.get_bool('MANDATORY_CHECK_NUMBER') and
self._has_bank_account())
def _get_columns(self):
columns = [Column('description', title=_('Description'),
expand=True, data_type=str)]
if self._has_bank_account():
columns.extend([Column('bank_account.bank_number',
title=_('Bank ID'),
data_type=str, justify=gtk.JUSTIFY_RIGHT),
Column('bank_account.bank_branch',
title=_('Bank branch'),
data_type=str, justify=gtk.JUSTIFY_RIGHT),
Column('bank_account.bank_account',
title=_('Bank account'),
data_type=str, justify=gtk.JUSTIFY_RIGHT)])
# Money methods doesn't have a payment_number related with it.
if self.method.method_name != u'money':
title = _('Number')
# Add (*) to indicate on column Number that this information is
# mandatory
if self._is_check_number_mandatory():
title += ' (*)'
columns.append(Column('payment_number',
title=title,
data_type=str, justify=gtk.JUSTIFY_RIGHT))
columns.extend([Column('due_date', title=_('Due date'),
data_type=datetime.date),
Column('value', title=_('Value'), data_type=currency,
justify=gtk.JUSTIFY_RIGHT)])
return columns
def _setup_widgets(self):
self.payment_list.set_columns(self._get_columns())
self.total_label.set_text(format_price(self.total_value, True,
DECIMAL_PRECISION))
def _update_difference_label(self):
difference = self.get_total_difference()
if not difference:
label_name = _('Difference')
elif difference > 0:
label_name = _('Overpaid:')
elif difference < 0:
label_name = _('Outstanding:')
difference *= -1
difference = format_price(difference, True, DECIMAL_PRECISION)
self.difference_label.set_text(difference)
self.difference_status_label.set_text(label_name)
def _run_edit_payment_dialog(self):
if not self._can_edit_payments():
return
payment = self.payment_list.get_selected()
old = deepcopy(payment)
retval = run_dialog(self.editor_class, self.parent, payment)
if not retval:
# Remove the changes if dialog was canceled.
pos = self.payment_list.get_selected_row_number()
self.payment_list.remove(payment)
self.payment_list.insert(pos, old)
self.emit('payment-edited')
#
# Public API
#
def update_view(self):
self.payment_list.refresh()
self._update_difference_label()
def add_payment(self, description, value, due_date, payment_number=None,
bank_account=None, refresh=True):
"""Add a payment to the list"""
# FIXME: Workaround to allow the check_number to be automatically added.
# payment_number is unicode in domain, on wizard we treat it as an int
# so we can automatically increment it and before we actually create
# the payment it is converted to unicode. Shouldn't it be int
# on domain?
if payment_number is not None:
payment_number = unicode(payment_number)
payment = _TemporaryPaymentData(description,
value,
due_date.date(),
payment_number,
bank_account)
self.payment_list.append(payment)
if refresh:
self.update_view()
def add_payments(self, installments_number, start_date,
interval, interval_type, bank_id=None, bank_branch=None,
bank_account=None, bank_first_number=None):
values = generate_payments_values(self.total_value,
installments_number)
due_dates = create_date_interval(interval_type=interval_type,
interval=interval,
count=installments_number,
start_date=start_date)
self.clear_payments()
bank_check_number = bank_first_number
for i in range(installments_number):
bank_data = None
if self._has_bank_account():
bank_data = _TemporaryBankData(bank_id, bank_branch,
bank_account)
description = self.method.describe_payment(self.group, i + 1,
installments_number)
self.add_payment(description, currency(values[i]), due_dates[i],
bank_check_number, bank_data, False)
if bank_check_number is not None:
bank_check_number += 1
self.update_view()
def create_payments(self):
"""Commit the payments on the list to the database"""
if not self.is_payment_list_valid():
return []
payments = []
for p in self.payment_list:
due_date = localdatetime(p.due_date.year,
p.due_date.month,
p.due_date.day)
try:
payment = self.method.create_payment(
payment_type=self.payment_type, payment_group=self.group,
branch=self.branch, value=p.value, due_date=due_date,
description=p.description, payment_number=p.payment_number)
except PaymentMethodError as err:
warning(str(err))
return
if p.bank_account:
# Add the bank_account into the payment, if any.
bank_account = payment.check_data.bank_account
bank_account.bank_number = p.bank_account.bank_number
bank_account.bank_branch = p.bank_account.bank_branch
bank_account.bank_account = p.bank_account.bank_account
payments.append(payment)
return payments
def clear_payments(self):
self.payment_list.clear()
self.update_view()
def get_total_difference(self):
total_payments = Decimal(0)
for payment in self.payment_list:
total_payments += payment.value
return (total_payments - self.total_value)
def are_due_dates_valid(self):
if sysparam.get_bool('ALLOW_OUTDATED_OPERATIONS'):
return True
previous_date = localtoday().date() + datetime.timedelta(days=-1)
for payment in self.payment_list:
if payment.due_date <= previous_date:
warning(_(u"Payment dates can't repeat or be lower than "
"previous dates."))
return False
previous_date = payment.due_date
return True
def are_payment_values_valid(self):
return not self.get_total_difference()
def is_check_number_valid(self):
"""Verify if check number is set"""
if not self._is_check_number_mandatory():
return True
for p in self.payment_list:
if p.payment_number is None or p.payment_number == u'':
return False
return True
def is_payment_list_valid(self):
if not self.are_due_dates_valid():
return False
if not self.are_payment_values_valid():
return False
if not self.is_check_number_valid():
return False
return True
#
# Kiwi Callbacks
#
def on_payment_list__row_activated(self, *args):
self._run_edit_payment_dialog()
self.update_view()
#
# Payment Method Slaves
#
class BasePaymentMethodSlave(BaseEditorSlave):
"""A base payment method slave for Bill and Check methods."""
gladefile = 'BasePaymentMethodSlave'
model_type = _BaseTemporaryMethodData
data_editor_class = _BasePaymentDataEditor
slave_holder = 'slave_holder'
proxy_widgets = ('interval_type_combo',
'intervals',
'first_duedate',
'installments_number',
'bank_id',
'bank_branch',
'bank_account',
'bank_first_check_number')
def __init__(self, wizard, parent, store, order_obj, payment_method,
outstanding_value=currency(0), first_duedate=None,
installments_number=None):
self.wizard = wizard
self.parent = parent
# Note that 'order' may be a Sale or a PurchaseOrder object
self.order = order_obj
self.method = payment_method
self.payment_type = self._get_payment_type()
self.total_value = outstanding_value or self._get_total_amount()
self.payment_group = self.order.group
self.payment_list = None
# This is very useful when calculating the total amount outstanding
# or overpaid of the payments
self.interest_total = currency(0)
self._first_duedate = first_duedate
self._installments_number = installments_number
BaseEditorSlave.__init__(self, store)
self.register_validate_function(self._refresh_next)
# Most of slaves don't have bank information
self._set_bank_widgets_visible(False)
self.bank_first_check_number.set_sensitive(True)
#
# Private Methods
#
def _set_bank_widgets_visible(self, visible=True):
self.bank_info_box.set_visible(visible)
def _clear_if_unset(self, widget, attr):
# FIXME: When the widget goes from not empty and valid to empty, its
# .read() method returns ValueUnset (int datatype behaviour), which
# makes the proxy not update the model, leaving the old value behind
try:
is_unset = widget.read() == ValueUnset
except ValidationError:
is_unset = True
if is_unset:
setattr(self.model, attr, None)
self.setup_payments()
def _refresh_next(self, validation_ok=True):
if not self.payment_list:
validation_ok = False
if validation_ok:
validation_ok = self.payment_list.is_payment_list_valid()
self.wizard.refresh_next(validation_ok)
def _setup_payment_list(self):
self.payment_list = PaymentListSlave(self.payment_type,
self.payment_group,
self.order.branch,
self.method,
self.total_value,
self.data_editor_class,
self.wizard)
if self.get_slave(BasePaymentMethodSlave.slave_holder):
self.detach_slave(BasePaymentMethodSlave.slave_holder)
self.attach_slave(BasePaymentMethodSlave.slave_holder,
self.payment_list)
self.setup_payments()
self.payment_list.connect('payment-edited',
self._on_payment_list__edit_payment)
def _setup_widgets(self):
max_installments = self.method.max_installments
self.installments_number.set_range(1, max_installments)
has_installments = (self._installments_number and
self._installments_number > 1 or False)
self.intervals.set_range(1, 99)
self.intervals.set_sensitive(has_installments)
interval_types = get_interval_type_items(plural=True)
self.interval_type_combo.prefill(interval_types)
self.interval_type_combo.select_item_by_data(INTERVALTYPE_MONTH)
self.interval_type_combo.set_sensitive(has_installments)
if self.method.method_name == 'check':
check_mandatory = sysparam.get_bool('MANDATORY_CHECK_NUMBER')
self.bank_first_check_number.set_property('mandatory', check_mandatory)
# PaymentListSlave setup
self._setup_payment_list()
def _get_total_amount(self):
"""Returns the order total amount """
if isinstance(self.order, Sale):
return self.order.get_total_sale_amount()
elif isinstance(self.order, ReturnedSale):
return self.model.sale_total
elif isinstance(self.order, PurchaseOrder):
return self.order.purchase_total
elif isinstance(self.order, PaymentRenegotiation):
return self.order.total
else:
raise TypeError
def _get_payment_type(self):
if isinstance(self.order, (Sale, PaymentRenegotiation, ReturnedSale,
StockDecrease)):
return Payment.TYPE_IN
elif isinstance(self.order, PurchaseOrder):
return Payment.TYPE_OUT
else:
raise TypeError("Could not guess payment type for %r" %
(self.order, ))
def _create_payments(self):
"""Insert the payment_list's payments in the base."""
return self.payment_list.create_payments()
#
# Public API
#
def setup_payments(self):
"""Setup the payments in PaymentList.
Note: The payments are not inserted into the db until self.finish()
is called. The wizard is responsable for that"""
if not self.model.first_duedate:
return
if self.payment_list:
self.payment_list.add_payments(self.model.installments_number,
self.model.first_duedate,
self.model.intervals,
self.model.interval_type,
self.model.bank_id,
self.model.bank_branch,
self.model.bank_account,
self.model.bank_first_check_number)
self.update_view()
def update_view(self):
self._refresh_next()
def get_interest_total(self):
return self.interest_total
#
# PaymentMethodStep hooks
#
def finish(self):
"""This method is called by the wizard when going to a next step.
If it returns False, the wizard can't go on."""
if (not self.payment_list or
not self.payment_list.is_payment_list_valid()):
return False
self._create_payments()
return True
#
# BaseEditor Slave hooks
#
def setup_proxies(self):
self._setup_widgets()
self.proxy = self.add_proxy(self.model,
BasePaymentMethodSlave.proxy_widgets)
def create_model(self, store):
return _BaseTemporaryMethodData(self._first_duedate,
self._installments_number)
#
# Kiwi callbacks
#
def _on_payment_list__edit_payment(self, *args):
"""Callback for the 'payment-edited' signal on PaymentListSlave"""
self.update_view()
def after_installments_number__changed(self, *args):
has_installments = self.model.installments_number > 1
self.interval_type_combo.set_sensitive(has_installments)
self.intervals.set_sensitive(has_installments)
self.setup_payments()
def after_intervals__changed(self, *args):
self.setup_payments()
def after_interval_type_combo__changed(self, *args):
self.setup_payments()
def after_first_duedate__changed(self, *args):
self.setup_payments()
def after_bank_id__changed(self, widget):
self._clear_if_unset(widget, 'bank_id')
self.setup_payments()
def after_bank_branch__changed(self, widget):
self._clear_if_unset(widget, 'bank_branch')
self.setup_payments()
def after_bank_account__changed(self, widget):
self._clear_if_unset(widget, 'bank_account')
self.setup_payments()
def after_bank_first_check_number__changed(self, widget):
self._clear_if_unset(widget, 'bank_first_check_number')
self.setup_payments()
def on_installments_number__validate(self, widget, value):
if not value:
return ValidationError(_("The number of installments "
"cannot be 0"))
max_installments = self.method.max_installments
if value > max_installments:
return ValidationError(_("The number of installments "
"must be less then %d") %
max_installments)
def on_first_duedate__validate(self, widget, value):
if sysparam.get_bool('ALLOW_OUTDATED_OPERATIONS'):
return
if value < datetime.date.today():
self.payment_list.clear_payments()
return ValidationError(_("Expected first installment date must be "
"set to a future date"))
class BillMethodSlave(BasePaymentMethodSlave):
"""Bill method slave"""
class CheckMethodSlave(BasePaymentMethodSlave):
"""Check method slave"""
data_editor_class = CheckDataEditor
def __init__(self, wizard, parent, store, total_amount,
payment_method, outstanding_value=currency(0),
first_duedate=None, installments_number=None):
BasePaymentMethodSlave.__init__(self, wizard, parent, store,
total_amount, payment_method,
outstanding_value=outstanding_value,
installments_number=installments_number,
first_duedate=first_duedate)
self._set_bank_widgets_visible(True)
class DepositMethodSlave(BasePaymentMethodSlave):
"""Deposit method slave"""
class StoreCreditMethodSlave(BasePaymentMethodSlave):
"""Store credit method slave"""
class MoneyMethodSlave(BasePaymentMethodSlave):
"""Money method slave"""
class CreditMethodSlave(BasePaymentMethodSlave):
"""Credit method slave"""
class CardMethodSlave(BaseEditorSlave):
"""A base payment method slave for card and finance methods.
Available slaves are: CardMethodSlave
"""
gladefile = 'CreditProviderMethodSlave'
model_type = _TemporaryCreditProviderGroupData
proxy_widgets = ('card_device', 'credit_provider', 'installments_number',
'auth_number')
def __init__(self, wizard, parent, store, order, payment_method,
outstanding_value=currency(0)):
self.order = order
self.wizard = wizard
self.method = payment_method
self._payment_group = self.order.group
self.total_value = (outstanding_value or
self._get_total_amount())
self._selected_type = CreditCardData.TYPE_CREDIT
BaseEditorSlave.__init__(self, store)
self.register_validate_function(self._refresh_next)
self.parent = parent
self._order = order
# this will change after the payment type is changed
self.installments_number.set_range(1, 1)
self._refresh_next(False)
#
# PaymentMethodStep hooks
#
def finish(self):
self._setup_payments()
return True
def update_view(self):
self._refresh_next()
#
# BaseEditor Slave hooks
#
def setup_proxies(self):
self._setup_widgets()
self.proxy = self.add_proxy(self.model, self.proxy_widgets)
# Workaround for a kiwi bug. report me
self.credit_provider.select_item_by_position(1)
self.credit_provider.select_item_by_position(0)
is_mandatory = sysparam.get_bool('MANDATORY_CARD_AUTH_NUMBER')
self.auth_number.set_property('mandatory', is_mandatory)
def create_model(self, store):
if store.find(CardPaymentDevice).is_empty():
raise ValueError('You must have card devices registered '
'before start doing sales')
providers = CreditProvider.get_card_providers(
self.method.store)
if providers.count() == 0:
raise ValueError('You must have credit providers information '
'stored in the database before start doing '
'sales')
return _TemporaryCreditProviderGroupData(provider=None)
# Private
def _get_total_amount(self):
if isinstance(self.order, Sale):
return self.order.get_total_sale_amount()
elif isinstance(self.order, ReturnedSale):
return self.model.sale_total
elif isinstance(self.order, PaymentRenegotiation):
return self.order.total
else:
raise TypeError
def _setup_widgets(self):
devices = CardPaymentDevice.get_devices(self.method.store)
self.card_device.prefill(api.for_combo(devices))
providers = CreditProvider.get_card_providers(
self.method.store)
self.credit_provider.prefill(api.for_combo(providers))
self._radio_group = None
for ptype, name in CreditCardData.types.items():
self._add_card_type(name, ptype)
def _add_card_type(self, name, payment_type):
radio = gtk.RadioButton(self._radio_group, name)
radio.set_data('type', payment_type)
radio.connect('toggled', self._on_card_type_radio_toggled)
self.types_box.pack_start(radio)
radio.show()
if self._radio_group is None:
self._radio_group = radio
def _on_card_type_radio_toggled(self, radio):
if not radio.get_active():
return
self._selected_type = radio.get_data('type')
self._setup_max_installments()
def _validate_auth_number(self):
is_auth_number_mandatory = sysparam.get_bool('MANDATORY_CARD_AUTH_NUMBER')
if is_auth_number_mandatory and self.auth_number.read() == ValueUnset:
return False
return True
def _refresh_next(self, validation_ok=True):
validation_ok = (validation_ok and self.model.installments_number and
self._validate_auth_number())
self.wizard.refresh_next(validation_ok)
def _setup_max_installments(self):
type = self._selected_type
maximum = 1
if type == CreditCardData.TYPE_CREDIT_INSTALLMENTS_STORE:
maximum = self.method.max_installments
elif type == CreditCardData.TYPE_CREDIT_INSTALLMENTS_PROVIDER:
provider = self.credit_provider.read()
if self.credit_provider is not None:
# If we have a credit provider, use the limit from that provider,
# otherwise fallback to the payment method.
maximum = provider.max_installments
else:
maximum = self.method.max_installments
if maximum > 1:
minimum = 2
else:
minimum = 1
# Use set_editable instead of set_sensitive so that the invalid state
# disables the finish
self.installments_number.set_editable(maximum != 1)
# TODO: Prevent validation signal here to avoid duplicate effort
self.installments_number.set_range(minimum, maximum)
self.installments_number.validate(force=True)
def _update_card_device(self):
provider = self.credit_provider.get_selected()
if provider and provider.default_device:
self.card_device.update(provider.default_device)
def _get_payment_details(self, cost=None):
"""Given the current state of this slave, this method will return a
tuple containing:
- The due date of the first payment. All other payments will have a one
month delta
- The fee (percentage) of the payments
- The fare (fixed cost) of the payments
The state considered is:
- Selected card device
- Selected card provider
- Selected card type
- Number of installments
"""
# If its a purchase order, the due date is today, and there is no fee
# and fare
if isinstance(self._order, PurchaseOrder):
return localnow(), 0, 0
# If there is no configuration for this payment settings, still let the
# user sell, but there will be no automatic calculation of the first due
# date and any other cost related to the payment.
if cost:
payment_days = cost.payment_days
else:
payment_days = 0
today = localnow()
first_duedate = today + relativedelta(days=payment_days)
return first_duedate
def _setup_payments(self):
device = self.card_device.read()
cost = device.get_provider_cost(provider=self.model.provider,
card_type=self._selected_type,
installments=self.model.installments_number)
first_duedate = self._get_payment_details(cost)
due_dates = []
for i in range(self.model.installments_number):
due_dates.append(first_duedate + relativedelta(months=i))
if isinstance(self._order, PurchaseOrder):
payments = self.method.create_payments(Payment.TYPE_OUT,
self._payment_group,
self.order.branch,
self.total_value, due_dates)
return
payments = self.method.create_payments(Payment.TYPE_IN,
self._payment_group,
self.order.branch,
self.total_value, due_dates)
operation = self.method.operation
for payment in payments:
data = operation.get_card_data_by_payment(payment)
data.installments = self.model.installments_number
data.auth = self.model.auth_number
data.update_card_data(device=device,
provider=self.model.provider,
card_type=self._selected_type,
installments=data.installments)
#
# Callbacks
#
def on_credit_provider__changed(self, combo):
self._setup_max_installments()
self._update_card_device()
def on_card_device__changed(self, combo):
self.installments_number.validate(force=True)
def on_installments_number__validate(self, entry, installments):
provider = self.credit_provider.read()
device = self.card_device.read()
# Prevent validating in case the dialog is still beeing setup
if ValueUnset in (device, provider, installments):
return
max_installments = self.installments_number.get_range()[1]
min_installments = self.installments_number.get_range()[0]
if not min_installments <= installments <= max_installments:
return ValidationError(_(u'Number of installments must be greater '
'than %d and lower than %d')
% (min_installments, max_installments))
def on_auth_number__validate(self, entry, value):
if entry.get_text_length() > 6:
return ValidationError(_("Authorization number must have 6 digits or less."))
class _MultipleMethodEditor(BaseEditor):
"""A generic editor that attaches a payment method slave in a toplevel
window.
"""
gladefile = 'HolderTemplate'
model_type = PaymentGroup
model_name = _(u'Payment')
size = (-1, 375)
def __init__(self, wizard, parent, store, order, payment_method,
outstanding_value=currency(0)):
BaseEditor.__init__(self, store, order.group)
self._method = payment_method
dsm = get_utility(IDomainSlaveMapper)
slave_class = dsm.get_slave_class(self._method)
assert slave_class
self.store.savepoint('before_payment_creation')
# FIXME: This is a workaround to make the slave_class to ignore the
# payments created previously.
class _InnerSlaveClass(slave_class):
def get_created_adapted_payments(self):
return []
self.slave = _InnerSlaveClass(wizard, parent, self.store, order,
self._method, outstanding_value)
# FIXME: We need to control how many payments could be created, since
# we are ignoring the payments created previously.
payments = order.group.get_valid_payments().find(
Payment.method_id == self._method.id)
max_installments = self._method.max_installments - payments.count()
self.slave.installments_number.set_range(1, max_installments)
self.attach_slave('place_holder', self.slave)
def validate_confirm(self):
return self.slave.finish()
def on_cancel(self):
self.store.rollback_to_savepoint('before_payment_creation')
class MultipleMethodSlave(BaseEditorSlave):
"""A base payment method slave for multiple payments
This slave is used to create/edit payments for an order in
a wizard and should be attached to a step. It will have a
list with all the order's payments and the possibility to
remove each of them and add new ones.
Useful to create payments in any method you want where their
sums with the existing payments should match the total (that is,
the ``outstanding_value`` or the model's total value). Note that
some arguments passed to __init__ will drastically modify the
behaviour of this slave.
Hint: When adding an amount of money greater than the actual
outstanding value, the actual value that will be added is
the outstanding value, so be prepared to give some change
"""
gladefile = 'MultipleMethodSlave'
model_type = object
# FIXME: Remove payment_method arg as it's not used
def __init__(self, wizard, parent, store, order, payment_method=None,
outstanding_value=currency(0), finish_on_total=True,
allow_remove_paid=True, require_total_value=True):
"""Initializes the slave
:param wizard: a :class:`stoqlib.gui.base.wizards.BaseWizard`
instance
:param parent: the parent of this slave (normally the one
who is attaching it)
:param store: a :class:`stoqlib.database.runtime.StoqlibStore`
instance
:param order: the order in question. This slave is prepared to
handle a |sale|, |purchase|, |returnedsale|,
|stockdecrease| and |paymentrenegotiation|
:param payment_method: Not used. Just ignore or pass None
:param outstanding_value: the outstanding value, that is
the quantity still missing in payments. If not passed,
it will be retrieved from the order directly
:param finish_on_total: if we should finish the ``wizard`` when
the total is reached. Note that it will finish as soon as
payment (that reached the total) is added
:param allow_remove_paid: if we should allow to remove (cancel)
paid payments from the list
:param require_total_value: if ``True``, we can only finish
the wizard if there's no outstanding value (that is, the
sum of all payments is equal to the total). Useful to allow
the partial payments creation
"""
self._require_total_value = require_total_value
self._has_modified_payments = False
self._allow_remove_paid = allow_remove_paid
self.finish_on_total = finish_on_total
# We need a temporary object to hold the value that will be read from
# the user. We will set a proxy with this temporary object to help
# with the validation.
self._holder = Settable(value=Decimal(0))
self._wizard = wizard
# 'money' is the default payment method and it is always avaliable.
self._method = PaymentMethod.get_by_name(store, u'money')
self._outstanding_value = outstanding_value
BaseEditorSlave.__init__(self, store, order)
self._outstanding_value = (self._outstanding_value or
self._get_total_amount())
self._total_value = self._outstanding_value
self._setup_widgets()
self.register_validate_function(self._refresh_next)
self.force_validation()
def setup_proxies(self):
self._proxy = self.add_proxy(self._holder, ['value'])
# The two methods below are required to be a payment method slave without
# inheriting BasePaymentMethodSlave.
def update_view(self):
self.force_validation()
# If this is a sale wizard, we cannot go back after payments have
# started being created.
if self._has_modified_payments:
self._wizard.disable_back()
def finish(self):
# All the payments are created in slaves. We still need to return
# True so the wizard can finish
return True
#
# Private
#
def _get_total_amount(self):
if isinstance(self.model, Sale):
sale_total = self.model.get_total_sale_amount()
# When editing the payments of a returned sale, we should deduct the
# value that was already returned.
returned_total = self.model.get_returned_value()
return sale_total - returned_total
elif isinstance(self.model, ReturnedSale):
return self.model.sale_total
elif isinstance(self.model, PaymentRenegotiation):
return self.model.total
elif isinstance(self.model, PurchaseOrder):
# If it is a purchase, consider the total amount as the total of
# payments, since it includes surcharges and discounts, and may
# include the freight (the freight can also be in a different
# payment group - witch should not be considered here.)
return self.model.group.get_total_value()
elif isinstance(self.model, StockDecrease):
return self.model.get_total_cost()
else:
raise AssertionError
def _setup_widgets(self):
# Removing payments should only be disable when using the tef plugin
manager = get_plugin_manager()
if manager.is_active('tef'):
self.remove_button.hide()
if isinstance(self.model, (PaymentRenegotiation, Sale, ReturnedSale,
StockDecrease)):
payment_type = Payment.TYPE_IN
elif isinstance(self.model, PurchaseOrder):
payment_type = Payment.TYPE_OUT
else:
raise AssertionError
# FIXME: All this code is duplicating SelectPaymentMethodSlave.
# Replace this with it
money_method = PaymentMethod.get_by_name(self.store, u'money')
self._add_method(money_method, payment_type)
for method in PaymentMethod.get_creatable_methods(
self.store, payment_type, separate=False):
if method.method_name in [u'multiple', u'money']:
continue
self._add_method(method, payment_type)
self.payments.set_columns(self._get_columns())
self.payments.add_list(self.model.group.payments)
self.total_value.set_bold(True)
self.received_value.set_bold(True)
self.missing_value.set_bold(True)
self.change_value.set_bold(True)
self.total_value.update(self._total_value)
self.remove_button.set_sensitive(False)
self._update_values()
def toggle_new_payments(self):
"""Toggle new payments addition interface.
This method verifies if its possible to add more payments (if the
total value is already reached), and enables or disables the value
entry and add button.
"""
can_add = self._outstanding_value != 0
for widget in [self.value, self.add_button]:
widget.set_sensitive(can_add)
def _update_values(self):
total_payments = self.model.group.get_total_value()
self._outstanding_value = self._total_value - total_payments
self.toggle_new_payments()
if self.value.validate() is ValueUnset:
self.value.update(self._outstanding_value)
elif self._outstanding_value > 0:
method_name = self._method.method_name
if method_name == u'credit':
# Set value to client's current credit or outstanding value, if
# it's less than the available credit.
value = min(self._outstanding_value,
self.model.client.credit_account_balance)
elif method_name == u'store_credit':
# Set value to client's current credit or outstanding value, if
# it's less than the available credit.
value = min(self._outstanding_value,
self.model.client.remaining_store_credit)
elif method_name == 'money':
# If the user changes method to money, keep the value he
# already typed.
value = self.value.read() or self._outstanding_value
else:
# Otherwise, default to the outstanding value.
value = self._outstanding_value
self.value.update(value)
else:
self.value.update(0)
self._outstanding_value = 0
self.received_value.update(total_payments)
self._update_missing_or_change_value()
self.value.grab_focus()
def _update_missing_or_change_value(self):
# The total value may be less than total received.
value = self._get_missing_change_value(with_new_payment=True)
missing_value = self._get_missing_change_value(with_new_payment=False)
self.missing_value.update(abs(missing_value))
change_value = currency(0)
if value < 0:
change_value = abs(value)
self.change_value.update(change_value)
if missing_value > 0:
self.missing.set_text(_('Missing:'))
elif missing_value < 0 and isinstance(self.model, ReturnedSale):
self.missing.set_text(_('Overpaid:'))
else:
self.missing.set_text(_('Difference:'))
def _get_missing_change_value(self, with_new_payment=False):
received = self.received_value.read()
if received == ValueUnset:
received = currency(0)
if with_new_payment:
new_payment = self.value.read()
if new_payment == ValueUnset:
new_payment = currency(0)
received += new_payment
return self._total_value - received
def _get_columns(self):
return [Column('description', title=_(u'Description'), data_type=str,
expand=True, sorted=True),
Column('status_str', title=_('Status'), data_type=str,
width=80),
Column('value', title=_(u'Value'), data_type=currency),
Column('due_date', title=_('Due date'),
data_type=datetime.date)]
def _add_method(self, payment_method, payment_type):
if not payment_method.is_active:
return
# some payment methods are not allowed without a client.
if payment_method.operation.require_person(payment_type):
if isinstance(self.model, StockDecrease):
return
elif (not isinstance(self.model, PurchaseOrder) and
self.model.client is None):
return
elif (isinstance(self.model, PurchaseOrder) and
(payment_method.method_name == 'store_credit' or
payment_method.method_name == 'credit')):
return
if (self.model.group.payer and
(payment_method.method_name == 'store_credit' or
payment_method.method_name == 'credit')):
try:
# FIXME: If the client can pay at least 0.01 with
# store_credit/credit, allow those methods. This is not an
# "all or nothing" situation, since the value is being divided
# between multiple payments
self.model.client.can_purchase(payment_method, Decimal('0.01'))
except SellError:
return
children = self.methods_box.get_children()
if children:
group = children[0]
else:
group = None
if (payment_method.method_name == 'credit' and
self.model.client and
self.model.client.credit_account_balance > 0):
credit = converter.as_string(
currency, self.model.client.credit_account_balance)
description = u"%s (%s)" % (payment_method.get_description(),
credit)
else:
description = payment_method.get_description()
radio = gtk.RadioButton(group, description)
self.methods_box.pack_start(radio)
radio.connect('toggled', self._on_method__toggled)
radio.set_data('method', payment_method)
radio.show()
if api.sysparam.compare_object("DEFAULT_PAYMENT_METHOD",
payment_method):
radio.set_active(True)
def _can_add_payment(self):
if self.value.read() is ValueUnset:
return False
if self._outstanding_value <= 0:
return False
payments = self.model.group.get_valid_payments()
payment_count = payments.find(
Payment.method_id == self._method.id).count()
if payment_count >= self._method.max_installments:
info(_(u'You can not add more payments using the %s '
'payment method.') % self._method.description)
return False
# If we are creaing out payments (PurchaseOrder) or the Sale does not
# have a client, assume all options available are creatable.
if (isinstance(self.model, (PurchaseOrder, StockDecrease))
or not self.model.client):
return True
method_values = {self._method: self._holder.value}
for i, payment in enumerate(self.model.group.payments):
# Cancelled payments doesn't count, and paid payments have already
# been validated.
if payment.is_cancelled() or payment.is_paid():
continue
method_values.setdefault(payment.method, 0)
method_values[payment.method] += payment.value
for method, value in method_values.items():
try:
self.model.client.can_purchase(method, value)
except SellError as e:
warning(str(e))
return False
return True
def _add_payment(self):
assert self._method
if not self._can_add_payment():
return
if self._method.method_name == u'money':
self._setup_cash_payment()
elif self._method.method_name == u'credit':
self._setup_credit_payment()
# We are about to create payments, so we need to consider the fiscal
# printer and its operations.
# See salewizard.SalesPersonStep.on_next_step for details.
# (We only emit this event for sales.)
if not isinstance(self.model, PurchaseOrder):
retval = CreatePaymentEvent.emit(self._method, self.model,
self.store, self._holder.value)
else:
retval = None
if retval is None or retval == CreatePaymentStatus.UNHANDLED:
if not (self._method.method_name == u'money' or
self._method.method_name == u'credit'):
self._run_payment_editor()
self._has_modified_payments = True
self._update_payment_list()
# Exigência do TEF: Deve finalizar ao chegar no total da venda.
if self.finish_on_total and self.can_confirm():
self._wizard.finish()
self.update_view()
def _remove_payment(self, payment):
if payment.is_preview():
payment.group.remove_item(payment)
payment.delete()
elif payment.is_paid():
if not self._allow_remove_paid:
return
entry = PaymentChangeHistory(payment=payment,
change_reason=_(
u'Payment renegotiated'),
store=self.store)
payment.set_not_paid(entry)
entry.new_status = Payment.STATUS_CANCELLED
payment.cancel()
else:
payment.cancel()
self._has_modified_payments = True
self._update_payment_list()
self.update_view()
def _setup_credit_payment(self):
payment_value = self._holder.value
assert isinstance(self.model, Sale)
try:
payment = self._method.create_payment(
Payment.TYPE_IN, self.model.group, self.model.branch, payment_value)
except PaymentMethodError as err:
warning(str(err))
payment.base_value = self._holder.value
return True
def _setup_cash_payment(self):
has_change_value = self._holder.value - self._outstanding_value > 0
if has_change_value:
payment_value = self._outstanding_value
else:
payment_value = self._holder.value
if isinstance(self.model, PurchaseOrder):
p_type = Payment.TYPE_OUT
else:
p_type = Payment.TYPE_IN
try:
payment = self._method.create_payment(
p_type, self.model.group, self.model.branch, payment_value)
except PaymentMethodError as err:
warning(str(err))
# We have to modify the payment, so the fiscal printer can calculate
# and print the change.
payment.base_value = self._holder.value
return True
def _update_payment_list(self):
# We reload all the payments each time we update the list. This will
# avoid the validation of each payment (add or update) and allow us to
# rename the payments at runtime.
self.payments.clear()
payment_group = self.model.group
payments = list(payment_group.payments.order_by(Payment.identifier))
preview_payments = [p for p in payments if p.is_preview()]
len_preview_payments = len(preview_payments)
for payment in payments:
if payment.is_preview():
continue
self.payments.append(payment)
for i, payment in enumerate(preview_payments):
payment.description = payment.method.describe_payment(
payment_group, i + 1, len_preview_payments)
self.payments.append(payment)
self._update_values()
def _run_payment_editor(self):
if self._wizard:
toplevel = self._wizard.get_current_toplevel()
else:
toplevel = None
retval = run_dialog(_MultipleMethodEditor, toplevel, self._wizard,
self, self.store, self.model, self._method,
self._holder.value)
return retval
def _refresh_next(self, value):
self._wizard.refresh_next(value and self.can_confirm())
#
# Public API
#
def enable_remove(self):
self.remove_button.show()
def can_confirm(self):
if not self.is_valid:
return False
missing_value = self._get_missing_change_value()
if self._require_total_value and missing_value != 0:
return False
assert missing_value >= 0, missing_value
return True
#
# Callbacks
#
def _on_method__toggled(self, radio):
if not radio.get_active():
return
self._method = radio.get_data('method')
self._update_values()
self.value.validate(force=True)
self.value.grab_focus()
def on_add_button__clicked(self, widget):
self._add_payment()
def on_remove_button__clicked(self, button):
payment = self.payments.get_selected()
if not payment:
return
if payment.is_cancelled():
return
self._remove_payment(payment)
def on_payments__selection_changed(self, objectlist, payment):
if not payment:
# Nothing selected
can_remove = False
elif not self._allow_remove_paid and payment.is_paid():
can_remove = False
elif (isinstance(self.model, PurchaseOrder) and
payment.payment_type == Payment.TYPE_IN):
# Do not allow to remove inpayments on orders, as only
# outpayments can be added
can_remove = False
elif (isinstance(self.model,
(PaymentRenegotiation, Sale, ReturnedSale)) and
payment.payment_type == Payment.TYPE_OUT):
# Do not allow to remove outpayments on orders, as only
# inpayments can be added
can_remove = False
else:
can_remove = True
self.remove_button.set_sensitive(can_remove)
def on_value__activate(self, entry):
if self.add_button.get_sensitive():
self._add_payment()
def on_value__changed(self, entry):
try:
value = entry.read()
except ValidationError as e:
self.add_button.set_sensitive(False)
return e
self.add_button.set_sensitive(value and
value is not ValueUnset)
self._update_missing_or_change_value()
def on_value__validate(self, entry, value):
retval = None
if value < 0:
retval = ValidationError(_(u'The value must be greater than zero.'))
if self._outstanding_value < 0:
self._outstanding_value = 0
is_money_method = self._method and self._method.method_name == u'money'
if self._outstanding_value - value < 0 and not is_money_method:
retval = ValidationError(_(u'The value must be lesser than the '
'missing value.'))
if not value and self._outstanding_value > 0:
retval = ValidationError(_(u'You must provide a payment value.'))
if not isinstance(self.model, StockDecrease):
if (self._method.method_name == 'store_credit' and
value > self.model.client.remaining_store_credit):
fmt = _(u'Client does not have enough credit. '
u'Client store credit: %s.')
retval = ValidationError(
fmt % currency(self.model.client.remaining_store_credit))
if (self._method.method_name == u'credit' and
value > self.model.client.credit_account_balance):
fmt = _(u'Client does not have enough credit. '
u'Client credit: %s.')
retval = ValidationError(
fmt % currency(self.model.client.credit_account_balance))
self._holder.value = value
self.toggle_new_payments()
if self._outstanding_value != 0:
self.add_button.set_sensitive(not bool(retval))
return retval
def register_payment_slaves():
dsm = get_utility(IDomainSlaveMapper)
default_store = api.get_default_store()
for method_name, slave_class in [
(u'money', MoneyMethodSlave),
(u'bill', BillMethodSlave),
(u'check', CheckMethodSlave),
(u'card', CardMethodSlave),
(u'credit', CreditMethodSlave),
(u'store_credit', StoreCreditMethodSlave),
(u'multiple', MultipleMethodSlave),
(u'deposit', DepositMethodSlave)]:
method = PaymentMethod.get_by_name(default_store, method_name)
dsm.register(method, slave_class)
| andrebellafronte/stoq | stoqlib/gui/slaves/paymentslave.py | Python | gpl-2.0 | 60,870 | [
"VisIt"
] | a02c0450bf0f0531b45475e1001e6033bc8439376ead0bcbc6efb396a256c406 |
from .._transcripts.transcript_base import TranscriptBase, Metric
from ..configuration.configuration import MikadoConfiguration
from ..configuration.daijin_configuration import DaijinConfiguration
from .transcript_methods import splitting, retrieval
from typing import List
from ..serializers.blast_serializer import Hit
from sqlalchemy import and_
from sqlalchemy import bindparam
from sqlalchemy.ext import baked
from sqlalchemy.sql.expression import desc, asc # SQLAlchemy imports
import pysam
import functools
import inspect
default_config = MikadoConfiguration()
class Transcript(TranscriptBase):
# Query baking to minimize overhead
bakery = baked.bakery()
blast_baked = bakery(lambda session: session.query(Hit))
blast_baked += lambda q: q.filter(and_(Hit.query == bindparam("query"),
Hit.evalue <= bindparam("evalue")), )
blast_baked += lambda q: q.order_by(asc(Hit.evalue))
# blast_baked += lambda q: q.limit(bindparam("max_target_seqs"))
def __init__(self, *args, configuration=None, **kwargs):
super().__init__(*args, **kwargs)
self.__configuration = None
self.configuration = configuration
@property
def configuration(self):
"""
Configuration dictionary. It can be None.
:return:
"""
if self.__configuration is None:
self.__configuration = default_config.copy()
return self.__configuration
@configuration.setter
def configuration(self, configuration):
"""
Setter for the configuration dictionary.
:param configuration: None or a dictionary
:type configuration: (None | MikadoConfiguration | DaijinConfiguration)
:return:
"""
if configuration is None:
configuration = default_config.copy()
assert isinstance(configuration, (MikadoConfiguration, DaijinConfiguration))
self.__configuration = configuration
def __getstate__(self):
state = super().__getstate__()
if hasattr(self, "configuration") and self.configuration is not None:
state["configuration"] = self.configuration.copy()
assert isinstance(state["configuration"], (MikadoConfiguration, DaijinConfiguration)), type(
self.configuration)
if isinstance(state["configuration"].reference.genome, pysam.FastaFile):
state["configuration"]["reference"]["genome"] = state["configuration"].reference.genome.filename
return state
def __setstate__(self, state):
self.configuration = state.pop("configuration", None)
self.__dict__.update(state)
self._calculate_cds_tree()
self._calculate_segment_tree()
self.logger = None
def split_by_cds(self) -> List:
"""This method is used for transcripts that have multiple ORFs.
It will split them according to the CDS information into multiple transcripts.
UTR information will be retained only if no ORF is down/upstream.
"""
for new_transcript in splitting.split_by_cds(self):
yield new_transcript
return
def load_information_from_db(self, configuration, introns=None, data_dict=None):
"""This method will load information regarding the transcript from the provided database.
:param configuration: Necessary configuration file
:type configuration: (MikadoConfiguration|DaijinConfiguration)
:param introns: the verified introns in the Locus
:type introns: None,set
:param data_dict: a dictionary containing the information directly
:type data_dict: dict
Verified introns can be provided from outside using the keyword.
Otherwise, they will be extracted from the database directly.
"""
retrieval.load_information_from_db(self,
configuration,
introns=introns,
data_dict=data_dict)
def load_orfs(self, candidate_orfs):
"""
Thin layer over the load_orfs method from the retrieval module.
:param candidate_orfs: list of candidate ORFs in BED12 format.
:return:
"""
retrieval.load_orfs(self, candidate_orfs)
def find_overlapping_cds(self, candidate_orfs):
"""
Thin wrapper for the homonym function in retrieval
:param candidate_orfs: List of candidate ORFs
:return:
"""
return retrieval.find_overlapping_cds(self, candidate_orfs)
# We need to overload this because otherwise we won't get the metrics from the base class.
@classmethod
@functools.lru_cache(maxsize=None, typed=True)
def get_available_metrics(cls) -> list:
"""This function retrieves all metrics available for the class."""
metrics = TranscriptBase.get_available_metrics()
for member in inspect.getmembers(cls):
if not member[0].startswith("__") and member[0] in cls.__dict__ and isinstance(
cls.__dict__[member[0]], Metric):
metrics.append(member[0])
_metrics = sorted(set([metric for metric in metrics]))
final_metrics = ["tid", "alias", "parent", "original_source", "score"] + _metrics
return final_metrics
# We need to overload this because otherwise we won't get the metrics from the base class.
@classmethod
@functools.lru_cache(maxsize=None, typed=True)
def get_modifiable_metrics(cls) -> set:
metrics = TranscriptBase.get_modifiable_metrics()
for member in inspect.getmembers(cls):
not_private = (not member[0].startswith("_" + cls.__name__ + "__") and not member[0].startswith("__"))
in_dict = (member[0] in cls.__dict__)
if in_dict:
is_metric = isinstance(cls.__dict__[member[0]], Metric)
has_fset = (getattr(cls.__dict__[member[0]], "fset", None) is not None)
else:
is_metric = None
has_fset = None
if all([not_private, in_dict, is_metric, has_fset]):
metrics.append(member[0])
return set(metrics)
| lucventurini/mikado | Mikado/transcripts/transcript.py | Python | lgpl-3.0 | 6,271 | [
"pysam"
] | e6f3b86dbaf0ee0156ba53cf18894f6945efb31e511dbbc500cc371dc64bd9f0 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess
from TestHarnessTestCase import TestHarnessTestCase
class TestHarnessTester(TestHarnessTestCase):
def testUnknownParam(self):
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.runTests('--no-color', '-i', 'unknown_param')
self.assertIn('unknown_param:5: unused parameter "not_a_parameter"',
cm.exception.output.decode('utf-8'))
| nuclear-wizard/moose | python/TestHarness/tests/test_UnknownParam.py | Python | lgpl-2.1 | 726 | [
"MOOSE"
] | ff31f60f201495a04181f1ca69620771fe7090e3e6455c17558419eb306c0a75 |
# Generated by Django 2.1.5 on 2019-02-01 12:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('activity_statistic', '0002_auto_20190201_1249'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
migrations.AlterField(
model_name='visit',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
]
| linea-it/dri | api/activity_statistic/migrations/0003_auto_20190201_1251.py | Python | gpl-3.0 | 827 | [
"VisIt"
] | d81f66df1f3c0ea03b9549d1cb2eacfbcce85fa722a6a91ab9d837054113b4b3 |
from pyPEG import *
import re
import types
_ = re.compile
class ParserError(Exception):pass
class WorkflowGrammar(dict):
def __init__(self):
peg, self.root = self._get_rules()
self.update(peg)
def _get_rules(self):
# 0 ?
# -1 *
# -2 +
#basic
def ws(): return _(r'\s+')
def space(): return _(r'[ \t]+')
def eol(): return _(r'\r\n|\r|\n')
def iden(): return _(r'[a-zA-Z_][a-zA-Z_0-9]*')
def colon(): return _(r':')
def blankline(): return 0, space, eol
def double_tripple(): return 0, ws, _(r'"""(.*?)"""', re.S), blankline
def single_tripple(): return 0, ws, _(r"'''(.*?)'''", re.S), blankline
def tripple(): return [single_tripple, double_tripple]
def comment_line(): return 0, space, _(r'#[^\r\n]*'), eol
#task
def task_def_head(): return 0, space, _(r'task'), space, task_def_name, 0, task_def_extend, 0, space, colon, blankline
def task_def_name(): return iden
def task_def_parent(): return iden
def task_def_desc(): return tripple
def task_def_end(): return 0, space, _(r'end'), blankline
def task_def_extend(): return 0, space, _(r'\('), 0, space, task_def_parent, 0, space, _(r'\)')
def task_code_name(): return iden
def task_code_head(): return 0, space, _(r'code'), 0, space, task_code_name, 0, space, colon, blankline
def task_code_body(): return _(r'(.*?)end[ \t]*$', re.M|re.S)
def task_code(): return task_code_head, task_code_body, blankline
def kwarg_name(): return iden
def kwarg_value(): return _(r'[^\r\n]+'), blankline
def kwarg(): return 0, space, kwarg_name, 0, space, colon, 0, space, kwarg_value
def task(): return task_def_head, 0, task_def_desc, -1, [kwarg, task_code, blankline, comment_line], task_def_end
#process
def process_def_head(): return 0, space, [_(r'process'),_(r'workflow')], space, process_def_name, 0, space, colon, blankline
def process_def_name(): return iden
def process_def_alias_task(): return iden
def process_def_desc(): return tripple
def process_task_alias(): return iden, -1, (0, space, _(r','), 0, space, iden), blankline
def process_task_def(): return 0, space, process_def_alias_task, 0, space, _(r'as'), 0, space, process_task_alias
def process_tasks_head(): return 0, space, _(r'tasks'), 0, space, colon, blankline
def process_tasks():
return process_tasks_head, -1, [process_task_def, blankline, comment_line], 0, space, _(r'end'), blankline
def process_flows_head(): return 0, space, _(r'flows'), 0, space, colon, blankline
def process_flows_line(): return -2, (0, space, iden, 0, space, _(r'->')), 0, space, iden, blankline
def process_flows():
return process_flows_head, -1, [process_flows_line, blankline, comment_line], 0, space, _(r'end'), blankline
def process_code_name(): return iden, 0, (_(r'.'), iden)
def process_code_head(): return 0, space, _(r'code'), 0, space, process_code_name, 0, space, colon, blankline
def process_code_body(): return _(r'(.*?)end[ \t]*$', re.M|re.S)
def process_code(): return process_code_head, process_code_body, blankline
def process(): return process_def_head, 0, process_def_desc, -1, [kwarg, process_tasks, process_flows, process_code, blankline, comment_line], 0, space, _(r'end'), blankline
#workflow
def workflow(): return 0, ws, -1, [task, process, blankline, comment_line]
peg_rules = {}
for k, v in ((x, y) for (x, y) in locals().items() if isinstance(y, types.FunctionType)):
peg_rules[k] = v
return peg_rules, workflow
def parse(self, text, root=None, skipWS=False, **kwargs):
if not text:
text = '\n'
if text[-1] not in ('\r', '\n'):
text = text + '\n'
text = re.sub('\r\n|\r', '\n', text)
return parseLine(text, root or self.root, skipWS=skipWS, **kwargs)
class SimpleVisitor(object):
def __init__(self, grammar=None):
self.grammar = grammar
def visit(self, nodes, root=False):
buf = []
if not isinstance(nodes, (list, tuple)):
nodes = [nodes]
if root:
method = getattr(self, '__begin__', None)
if method:
buf.append(method())
for node in nodes:
if isinstance(node, (str, unicode)):
buf.append(node)
else:
if hasattr(self, 'before_visit'):
buf.append(self.before_visit(node))
method = getattr(self, 'visit_' + node.__name__ + '_begin', None)
if method:
buf.append(method(node))
method = getattr(self, 'visit_' + node.__name__, None)
if method:
buf.append(method(node))
else:
if isinstance(node.what, (str, unicode)):
buf.append(node.what)
else:
buf.append(self.visit(node.what))
method = getattr(self, 'visit_' + node.__name__ + '_end', None)
if method:
buf.append(method(node))
if hasattr(self, 'after_visit'):
buf.append(self.after_visit(node))
if root:
method = getattr(self, '__end__', None)
if method:
buf.append(method())
return ''.join(buf)
class WorkflowVisitor(SimpleVisitor):
def __init__(self, grammar=None):
self.grammar = grammar
self.tasks = {}
self.processes = {}
def visit_task(self, node):
t = {'codes':{}}
name = node.find('task_def_name').text
parent = node.find('task_def_parent')
if parent:
t_parent = self.tasks.get(parent.text)
if not t_parent:
raise ParserError("Can't find Task %s definition" % parent.text)
t.update(t_parent)
desc = node.find('task_def_desc')
if desc:
t['desc'] = desc.text.strip()[3:-3].strip()
for k in node.find_all('kwarg'):
n = k.find('kwarg_name').text.strip()
v = k.find('kwarg_value').text.strip()
t[n] = v
for k in node.find_all('task_code'):
_n = k.find('task_code_name').text
code = k.find('task_code_body').text
fname, funcname = self._format_func_name(name, _n)
_code = self._format_func_code(funcname, code)
if _code:
t['codes'][fname] = _code
t['name'] = name
self.tasks[name] = t
return ''
def _format_func_name(self, processname, funcname):
_name = processname + '_' + funcname.replace('.', '_')
return _name, 'def ' + _name + '():'
def _format_func_code(self, funcname, code):
def find_indent(lines):
for i in range(1, len(lines)):
line = lines[i]
_line = line.lstrip()
if _line.startswith('#'):
continue
else:
diff = len(line) - len(_line)
if diff == 0:
indent = 4
else:
indent = 4 - diff
return indent
s = [funcname]
s.extend(code.splitlines()[:-1])
indent = -1
space = ''
index = 0
for i in range(1, len(s)):
if indent == -1:
indent = find_indent(s)
if indent >= 0:
space = ' ' * indent
else:
index = -indent
if indent >= 0:
s[i] = space + s[i]
else:
s[i] = s[i][index:]
if len(s) == 1:
return ''
return '\n'.join(s)
def visit_process(self, node):
p = {'tasks':{}, 'flows':[], 'codes':{}}
name = node.find('process_def_name').text
desc = node.find('process_def_desc')
if desc:
p['desc'] = desc.text.strip()[3:-3].strip()
for k in node.find_all('kwarg'):
n = k.find('kwarg_name').text.strip()
v = k.find('kwarg_value').text.strip()
p[n] = v
for t in node.find_all('process_task_def'):
_task = t.find('process_def_alias_task').text
aliases = t.find('process_task_alias')
for alias in aliases.find_all('iden'):
_n = alias.text
p['tasks'][_n] = _task
for t in node.find_all('process_flows_line'):
flow_begin = None
for x in t.find_all('iden'):
_n = x.text
if not flow_begin:
flow_begin = _n
else:
p['flows'].append((flow_begin, _n))
flow_begin = _n
if _n not in p['tasks']:
p['tasks'][_n] = _n
for t in node.find_all('process_code'):
_n = t.find('process_code_name').text
code = t.find('process_code_body').text
fname, funcname = self._format_func_name(name, _n)
_code = self._format_func_code(funcname, code)
if _code:
p['codes'][fname] = _code
p['name'] = name
self.processes[name] = p
return ''
def parse(text, raise_error=True):
g = WorkflowGrammar()
resultSoFar = []
result, rest = g.parse(text, resultSoFar=resultSoFar, skipWS=False)
if raise_error and rest:
raise ParserError("Parse is not finished, the rest is [%s]" % rest)
v = WorkflowVisitor(g)
v.visit(result, True)
return v.tasks, v.processes
def parseFile(filename, raise_error=True):
with open(filename) as f:
text = f.read()
return parse(text, raise_error) | uliwebext/uliweb-redbreast | redbreast/core/spec/parser.py | Python | bsd-2-clause | 10,265 | [
"VisIt"
] | 2f854b2306317de10f406640d277fafdf38de5838ae594e20ec2567adb8297be |
#!/usr/bin/env python
#author: wowdd1
#mail: developergf@gmail.com
#data: 2014.12.07
from spider import *
sys.path.append("..")
from utils import Utils
from record import Record
class GithubSpider(Spider):
lang_list = [
"C",
"C++",
"C#",
"Clojure",
"CoffeeScript",
"Common Lisp",
"CSS",
"D",
"Dart",
"Erlang",
"F#",
"Go",
"Haskell",
"Java",
"JavaScript",
"Julia",
"Lua",
"Matlab",
"Objective-C",
"Perl",
"PHP",
"Python",
"R",
"Ruby",
"Scala",
"Scheme",
"Shell",
"SQL",
"Swift"]
popular_lang_list = [
"ActionScript",
"C",
"C#",
"C++",
"Clojure",
"CoffeeScript",
"CSS",
"Go",
"Haskell",
"HTML",
"Java",
"JavaScript",
"Jupyter+Notebook",
"Lua",
"Matlab",
"Objective-C",
"Perl",
"PHP",
"Python",
"R",
"Ruby",
"Scala",
"Shell",
"Swift",
"TeX",
"VimL"]
other_lang_list = [
"ABAP",
"Ada",
"Agda",
"AGS Script",
"Alloy",
"AMPL",
"Ant Build System",
"ANTLR",
"ApacheConf",
"Apex",
"API Blueprint",
"APL",
"AppleScript",
"Arc",
"Arduino",
"AsciiDoc",
"ASP",
"AspectJ",
"Assembly",
"ATS",
"Augeas",
"AutoHotkey",
"AutoIt",
"Awk",
"Batchfile",
"Befunge",
"Bison",
"BitBake",
"BlitzBasic",
"BlitzMax",
"Bluespec",
"Boo",
"Brainfuck",
"Brightscript",
"Bro",
"C-ObjDump",
"C2hs Haskell",
"Cap'n Proto",
"CartoCSS",
"Ceylon",
"Chapel",
"Charity",
"ChucK",
"Cirru",
"Clarion",
"Clean",
"CLIPS",
"CMake",
"COBOL",
"ColdFusion",
"ColdFusion CFC",
"Common Lisp",
"Component Pascal",
"Cool",
"Coq",
"Cpp-ObjDump",
"Creole",
"Crystal",
"Cucumber",
"Cuda",
"Cycript",
"Cython",
"D",
"D-ObjDump",
"Darcs Patch",
"Dart",
"desktop",
"Diff",
"DIGITAL Command Language",
"DM",
"Dockerfile",
"Dogescript",
"DTrace",
"Dylan",
"E",
"Eagle",
"eC",
"Ecere Projects",
"ECL",
"edn",
"Eiffel",
"Elixir",
"Elm",
"Emacs Lisp",
"EmberScript",
"Erlang",
"F#",
"Factor",
"Fancy",
"Fantom",
"Filterscript",
"fish",
"FLUX",
"Formatted",
"Forth",
"FORTRAN",
"Frege",
"G-code",
"Game Maker Language",
"GAMS",
"GAP",
"GAS",
"GDScript",
"Genshi",
"Gentoo Ebuild",
"Gentoo Eclass",
"Gettext Catalog",
"GLSL",
"Glyph",
"Gnuplot",
"Golo",
"Gosu",
"Grace",
"Gradle",
"Grammatical Framework",
"Graph Modeling Language",
"Graphviz (DOT)",
"Groff",
"Groovy",
"Groovy Server Pages",
"Hack",
"Haml",
"Handlebars",
"Harbour",
"Haxe",
"HCL",
"HTML+Django",
"HTML+ERB",
"HTML+PHP",
"HTTP",
"Hy",
"HyPhy",
"IDL",
"Idris",
"IGOR Pro",
"Inform 7",
"INI",
"Inno Setup",
"Io",
"Ioke",
"IRC log",
"Isabelle",
"Isabelle ROOT",
"J",
"Jade",
"Jasmin",
"Java Server Pages",
"JFlex",
"JSON",
"JSON5",
"JSONiq",
"JSONLD",
"Julia",
"KiCad",
"Kit",
"Kotlin",
"KRL",
"LabVIEW",
"Lasso",
"Latte",
"Lean",
"Less",
"Lex",
"LFE",
"LilyPond",
"Limbo",
"Linker Script",
"Linux Kernel Module",
"Liquid",
"Literate Agda",
"Literate CoffeeScript",
"Literate Haskell",
"LiveScript",
"LLVM",
"Logos",
"Logtalk",
"LOLCODE",
"LookML",
"LoomScript",
"LSL",
"M",
"Makefile",
"Mako",
"Markdown",
"Mask",
"Mathematica",
"Maven POM",
"Max",
"MediaWiki",
"Mercury",
"MiniD",
"Mirah",
"Modelica",
"Modula-2",
"Module Management System",
"Monkey",
"Moocode",
"MoonScript",
"MTML",
"MUF",
"mupad",
"Myghty",
"NCL",
"Nemerle",
"nesC",
"NetLinx",
"NetLinx+ERB",
"NetLogo",
"NewLisp",
"Nginx",
"Nimrod",
"Ninja",
"Nit",
"Nix",
"NL",
"NSIS",
"Nu",
"NumPy",
"ObjDump",
"Objective-C++",
"Objective-J",
"OCaml",
"Omgrofl",
"ooc",
"Opa",
"Opal",
"OpenCL",
"OpenEdge ABL",
"OpenSCAD",
"Org",
"Ox",
"Oxygene",
"Oz",
"Pan",
"Papyrus",
"Parrot",
"Parrot Assembly",
"Parrot Internal Representation",
"Pascal",
"PAWN",
"Perl6",
"PicoLisp",
"PigLatin",
"Pike",
"PLpgSQL",
"PLSQL",
"Pod",
"PogoScript",
"PostScript",
"PowerShell",
"Processing",
"Prolog",
"Propeller Spin",
"Protocol Buffer",
"Public Key",
"Puppet",
"Pure Data",
"PureBasic",
"PureScript",
"Python traceback",
"QMake",
"QML",
"Racket",
"Ragel in Ruby Host",
"RAML",
"Raw token data",
"RDoc",
"REALbasic",
"Rebol",
"Red",
"Redcode",
"RenderScript",
"reStructuredText",
"RHTML",
"RMarkdown",
"RobotFramework",
"Rouge",
"Rust",
"Sage",
"SaltStack",
"SAS",
"Sass",
"Scaml",
"Scheme",
"Scilab",
"SCSS",
"Self",
"ShellSession",
"Shen",
"Slash",
"Slim",
"Smali",
"Smalltalk",
"Smarty",
"SMT",
"SourcePawn",
"SPARQL",
"SQF",
"SQL",
"SQLPL",
"Squirrel",
"Standard ML",
"Stata",
"STON",
"Stylus",
"SuperCollider",
"SVG",
"SystemVerilog",
"Tcl",
"Tcsh",
"Tea",
"Text",
"Textile",
"Thrift",
"TOML",
"Turing",
"Turtle",
"Twig",
"TXL",
"TypeScript",
"Unified Parallel C",
"Unity3D Asset",
"UnrealScript",
"Vala",
"VCL",
"Verilog",
"VHDL",
"Visual Basic",
"Volt",
"Vue",
"Web Ontology Language",
"WebIDL",
"wisp",
"xBase",
"XC",
"XML",
"Xojo",
"XPages",
"XProc",
"XQuery",
"XS",
"XSLT",
"Xtend",
"Yacc",
"YAML",
"Zephir",
"Zimpl"]
result = ""
request_times = 0
token = ''
def __init__(self):
Spider.__init__(self)
self.school = "github"
f = open('../github_token', 'rU')
self.token = ''.join(f.readlines()).strip()
def isQueryLang(self, lang):
for item in self.lang_list:
if item.lower() == lang.lower():
return True
return False
def requestWithAuth(self, url):
if self.token != "":
return requests.get(url, auth=(self.token, ''))
else:
return requests.get(url)
def getUrl(self, lang, page, large_than_stars, per_page):
if self.isQueryLang(lang) == True:
return "https://api.github.com/search/repositories?page=" + str(page) + "&per_page=" + per_page + "&q=stars:>" + large_than_stars +"+language:" + lang.replace("#","%23").replace("+","%2B") + "&sort=stars&order=desc"
else:
return "https://api.github.com/search/repositories?page=" + str(page) + "&per_page=" + per_page + "&q=" + lang + "+stars:>" + large_than_stars + "&sort=stars&order=desc"
def checkRequestTimes(self):
self.request_times += 1
if self.request_times % 10 == 0:
print "wait 60s..."
time.sleep(60)
def processPageData(self, f, file_name, lang, url, name_contain=''):
#self.checkRequestTimes()
#print "url: " + url
r = self.requestWithAuth(url)
dict_obj = json.loads(r.text)
total_size = 0
for (k, v) in dict_obj.items():
if k == "total_count":
total_size = v
if k == "message":
print v
self.result += lang + " "
self.cancel_upgrade(file_name)
return
if k == "items":
for item in v:
if name_contain != '' and item["name"].find(name_contain) == -1:
continue
data = str(item['stargazers_count']) + " " + item["name"] + " " + item['html_url']
print data
description = ""
if item['description'] != None:
description = 'author:' + item['owner']['login'] + ' description:' + item['description'] + " (stars:" + str(item["stargazers_count"]) + " forks:" + str(item['forks_count']) + " watchers:" + str(item['watchers']) + ")"
self.write_db(f, 'github-' + item['owner']['login'].strip() + '-' + item["name"].strip(), item["name"], item['html_url'], description)
self.count = self.count + 1
return total_size
def processGithubData(self, lang, large_than_stars, per_page, name_contain=''):
file_name = self.get_file_name("eecs/projects/github/" + lang, self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
page = 1
url = self.getUrl(lang, page, str(large_than_stars), str(per_page))
self.count = 0
print "processing " + lang + " url: " + url
total_size = self.processPageData(f, file_name, lang, url, name_contain)
if total_size > 1000:
total_size = 1000
while total_size > (page *per_page):
#print "total size:" + str(total_size) + " request page 2"
page += 1
self.processPageData(f, file_name, lang, self.getUrl(lang, page, str(large_than_stars), str(per_page)), name_contain)
self.close_db(f)
if self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getUserUrl(self, location, followers, page, per_page):
if location == "all":
return "https://api.github.com/search/users?page=" + str(page) + "&per_page=" + per_page + "&q=followers:>" + followers
else:
return "https://api.github.com/search/users?page=" + str(page) + "&per_page=" + per_page + "&q=followers:>" + followers + "+location:" + location
def getUserRepos(self, url):
#self.checkRequestTimes()
r = self.requestWithAuth(url)
repos = ""
jobj = json.loads(r.text)
repo_dict = {}
for repo in jobj:
str_repo = repo['name'] + "("
if repo["stargazers_count"] != None and repo["stargazers_count"] > 0:
str_repo += "stars:" + str(repo["stargazers_count"])
if repo['forks_count'] != None and repo['forks_count'] > 0:
str_repo += " forks:" + str(repo['forks_count'])
if repo['watchers'] != None and repo['watchers'] > 0:
str_repo += " watchers:" + str(repo['watchers'])
if repo["language"] != None:
str_repo += " lang:" + str(repo["language"])
if repo['stargazers_count'] != None:
repo_dict[repo.get("stargazers_count", 0)] = str_repo.strip() + ") "
else:
repo_dict[0] = str_repo.strip() + ") "
print sorted(repo_dict.keys(), reverse=True)
i = 0
for k, repo in [(k,repo_dict[k]) for k in sorted(repo_dict.keys(), reverse=True)]:
i += 1
if i == 1:
repos += "toprepo:" + repo + ' '
else:
repos += "project:" + repo
print repos + "\n"
return repos
def getUserFollowers(self, url):
#self.checkRequestTimes()
r = self.requestWithAuth(url)
followers = ""
jobj = json.loads(r.text)
for follower in jobj:
followers += follower["login"] + " "
print followers + "\n"
return followers
def processUserPageData(self, f, file_name, url):
#self.checkRequestTimes()
r = self.requestWithAuth(url)
dict_obj = json.loads(r.text)
total_size = 0
for (k, v) in dict_obj.items():
if k == "total_count":
total_size = v
if k == "message":
print v
self.cancel_upgrade(file_name)
return
if k == "items":
for item in v:
data = str(item["id"]) + " " + item["login"] + " " + item["html_url"]
print data
self.write_db(f, item["type"] + "-" + str(item["id"]), item["login"], item["html_url"], self.getUserRepos(item["repos_url"])) #"followers: " + self.getUserFollowers(item["followers_url"]))
self.count = self.count + 1
return total_size
def processGithubiUserData(self, location, followers, per_page):
#self.checkRequestTimes()
file_name = self.get_file_name("rank/" + location, self.school + "-user")
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
page = 1
url = self.getUserUrl(location, str(followers), str(page), str(per_page))
print "processing " + url
total_size = self.processUserPageData(f, file_name, url)
if total_size > 1000:
total_size = 1000
while total_size > (page *per_page):
page += 1
self.processUserPageData(f, file_name, self.getUserUrl(location, str(followers), str(page), str(per_page)))
self.close_db(f)
if self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def getOrganizationProjects(self):
data_eecs = {"google" : "https://github.com/google",\
"google-cloud-platform" : "https://github.com/GoogleCloudPlatform",\
'googlesamples' : 'https://github.com/googlesamples',\
"youtube" : 'https://github.com/youtube',\
"microsoft" : "https://github.com/Microsoft",\
"donet" : "https://github.com/dotnet",\
"apple" : "https://github.com/apple/",\
"yahoo" : "https://github.com/yahoo",\
"facebook" : "https://github.com/facebook",\
"twitter" : "https://github.com/twitter",\
"aws" : "https://github.com/aws",\
"awslabs" : "https://github.com/awslabs",\
"amznlabs" : "https://github.com/amznlabs",\
"awslabs" : "https://github.com/awslabs",\
"linkedin" : "https://github.com/linkedin",\
"baidu" : "https://github.com/Baidu",\
"baidu-research" : 'https://github.com/baidu-research',\
"dmlc" : "https://github.com/dmlc",\
"amplab" : "https://github.com/amplab",\
'OculusVR' : 'https://github.com/OculusVR/',\
'OSVR' : 'https://github.com/OSVR/',\
'ValveSoftware' : 'https://github.com/ValveSoftware',\
'id-Software' : 'https://github.com/id-Software',\
'EA-games' : 'https://github.com/electronicarts',\
'sony' : 'https://github.com/sony',\
'Blizzard' : 'https://github.com/Blizzard',\
'openai' : 'https://github.com/openai',\
'deepmind' : "https://github.com/deepmind",\
'mozilla' : 'https://github.com/mozilla',\
'openstack': 'https://github.com/openstack',\
'reddit' : 'https://github.com/reddit',\
'quora' : 'https://github.com/quora',\
'netflix' : 'https://github.com/Netflix',\
'adobe' : 'https://github.com/adobe',\
'alibaba' : 'https://github.com/Alibaba',\
'ebay' : 'https://github.com/ebay',\
'zhihu' : 'https://github.com/zhihu',\
'vimeo' : 'https://github.com/vimeo',\
'aol' : 'https://github.com/aol',\
'yelp' : 'https://github.com/yelp',\
'wordpress' : 'https://github.com/wordpress',\
'ibm' : 'https://github.com/ibm',\
'netease' : 'https://github.com/NetEase',\
'mysql' : 'https://github.com/mysql',\
'imgur' : 'https://github.com/imgur',\
'sogou' : 'https://github.com/sogou',\
'flickr' : 'https://github.com/Flickr',\
'hulu' : 'https://github.com/hulu',\
'coursera' : 'https://github.com/coursera',\
'edx' : 'https://github.com/edx',\
'udacity' : 'https://github.com/udacity',\
'commaai' : 'https://github.com/commaai',\
'bvlc' : 'https://github.com/BVLC',\
'tum-vision' : 'https://github.com/tum-vision/',\
'GoogleChrome' : 'https://github.com/GoogleChrome',\
'uArm-Developer' : 'https://github.com/uArm-Developer',\
'arduino' : 'https://github.com/arduino',\
'ai2' : 'https://github.com/allenai',\
'microsoft-research' : 'https://github.com/microsoftresearch',\
'facebook-research' : 'https://github.com/facebookresearch',\
'ibm-research' : 'https://github.com/ibm-research',\
'ibm-watson' : 'https://github.com/ibm-watson',\
'csail' : 'https://github.com/csail',\
'stanford' : 'https://github.com/stanford',\
'IBM-Bluemix' : 'https://github.com/IBM-Bluemix',\
'watson-developer-cloud' : 'https://github.com/watson-developer-cloud',\
'Samsung' : 'https://github.com/Samsung',\
'nvidia' : 'https://github.com/nvidia',\
'AMD' : 'https://github.com/amd',\
'macmillanpublishers' : 'https://github.com/macmillanpublishers',\
'oreillymedia' : 'https://github.com/oreillymedia',\
'usgs' : 'https://github.com/usgs',\
'gitter' : 'https://github.com/gitterHQ',\
'Oxford-Robotics-Institute' : 'https://github.com/oxford-ori',\
'ToyotaResearchInstitute' : 'https://github.com/ToyotaResearchInstitute',\
'mila-udem' : 'https://github.com/mila-udem'}
data_neuro = {'INCF' : 'https://github.com/INCF',\
'nipy' : 'https://github.com/nipy',\
'OpenNeuroLab' : 'https://github.com/OpenNeuroLab',\
'PySurfer' : 'https://github.com/PySurfer',\
'CBMM' : 'https://github.com/CBMM',\
'AllenInstitute' : 'https://github.com/AllenInstitute',\
'ACElab' : 'https://github.com/aces',\
'MCB80x' : 'https://github.com/mcb80x',\
'BackyardBrains' : 'https://github.com/BackyardBrains',\
'nengo' : 'https://github.com/nengo'}
data_gene = { 'CIDAR-LAB' : 'https://github.com/CIDARLAB',\
'Voigt-Lab' : 'https://github.com/VoigtLab',\
'ENCODE-DCC' : 'https://github.com/ENCODE-DCC'}
self.getProjectByDict(data_eecs, 'eecs/projects/github/organization/')
self.getProjectByDict(data_neuro, 'neuroscience/projects/github/organization/')
self.getProjectByDict(data_gene, 'biology/projects/github/organization/')
#self.getStartupPorjects('../db/economics/startup-billion-dollar-club2016')
#self.getStartupPorjects('../db/rank/smartest-companies2016')
#self.getStartupPorjects('../db/rank/self-driving-company2016')
def getStartupPorjects(self, path):
data = {}
if os.path.exists(path):
f = open(path, 'rU')
for line in f.readlines():
record = Record(line)
key = record.get_title().replace(' ', '').replace('.', '').strip()
url = 'https://github.com/' + key
data[key.lower()] = url
if len(data) > 0:
self.getProjectByDict(data, 'eecs/projects/github/organization/')
def getProjectByDict(self, data, path):
for k in data:
file_name = self.get_file_name(path + k, self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
print data[k]
r = self.requestWithAuth(data[k])
soup = BeautifulSoup(r.text)
pages = 1
for a in soup.find_all('a'):
if a['href'].find('page=') != -1 and a.text != "Next":
pages = int(a.text)
print pages
project_dict = {}
for i in range(1, pages + 1):
print data[k] + "?page=" + str(i)
r = self.requestWithAuth(data[k] + "?page=" + str(i))
soup = BeautifulSoup(r.text)
starDict = {}
for a in soup.find_all('a', class_='muted-link tooltipped tooltipped-s mr-3'):
project = a['href'].replace('/stargazers', '')
project = project[project.rfind('/') + 1 :].lower()
starDict[project] = int(a.text.strip().replace(',', ''))
for li in soup.find_all('li'):
if li.h3 == None:
continue
title = li.h3.a.text.strip()
if starDict.has_key(title.lower()) == False:
starDict[title.lower()] = 0
desc = ''
if li.p != None:
desc = "description:" + li.p.text.strip().replace('\n', '')
self.count += 1
id = 'github-' + k + "-" + str(self.count)
record = self.get_storage_format(str(starDict[title.lower()]), title, "https://github.com" + li.h3.a['href'], desc)
project_dict[id] = Record(record)
self.count = 0
for item in sorted(project_dict.items(), key=lambda project_dict:int(project_dict[1].get_id().strip()), reverse=True):
print item[1].get_id() + " " + item[1].get_title()
self.count += 1
id = item[0][0 : item[0].rfind('-')] + '-' + item[1].get_title().strip()
self.write_db(f, id, item[1].get_title().strip(), item[1].get_url().strip(), 'author:'+ k + ' ' + item[1].get_describe().strip())
self.close_db(f)
if self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def doWork(self):
star = 300
per_page = 100
self.getProjects(star, per_page)
#self.getKeyProjects(star, per_page)
#self.getUsers(500, 100)
#self.getOrganizations()
def getProjects(self, star, per_page):
for lang in self.lang_list:
self.processGithubData(lang, star, per_page)
if len(self.result) > 1:
print self.result + " is not be updated"
def getKeyProjects(self, star, per_page):
keywords = ['awesome', 'computer vision', 'nlp', 'artificial intelligence', 'spark', 'machine learning', 'deep learning', 'android']
for keyword in keywords:
print "get " + keyword + " data..."
if keyword == "awesome":
self.processGithubData(keyword, 100, per_page, keyword)
else:
self.processGithubData(keyword, star, per_page)
def getUsers(self, star, per_page):
print "get user data..."
self.processGithubiUserData("all", star, per_page)
self.processGithubiUserData("china", star, per_page)
def getOrganizations(self):
self.getOrganizationProjects()
start = GithubSpider()
start.doWork()
| roscopecoltran/scraper | .staging/meta-engines/xlinkBook/update/update_github.py | Python | mit | 26,016 | [
"CRYSTAL"
] | 542065253fad5e3fdc55e6421ee4bd4b71d53ee10f3262e89369ca5e941ea4e7 |
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Generalized gradient testing applied to dilated conv layer
"""
import itertools as itt
import numpy as np
import pytest
from neon import NervanaObject
from neon.layers.layer import Convolution
from neon.initializers.initializer import Gaussian
from grad_funcs import general_gradient_comp
# add a reset methods to the layer classes
# this is used to reset the layer so that
# running fprop and bprop multiple times
# produces repeatable results
# some layers just need the function defined
class ConvWithReset(Convolution):
def reset(self):
self.nglayer = None
def pytest_generate_tests(metafunc):
# main test generator
# generates the parameter combos for
# the tests based on whether the
# "--all" option is given to py.test
# that option is added in conftest.py
# global parameter
if metafunc.config.option.all:
bsz_rng = [16, 32]
else:
bsz_rng = [16]
if 'convargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
nin_rng = [5, 8]
nifm_rng = [1, 2, 4]
fs_rng = [2, 3, 4]
dil_h_rng = [1, 2, 3, 4]
dil_w_rng = [1, 2, 3, 4]
else:
nin_rng = [10]
nifm_rng = [1, 5]
fs_rng = [2, 3]
dil_h_rng = [3]
dil_w_rng = [3]
fargs = itt.product(nin_rng, nifm_rng, fs_rng, bsz_rng, dil_h_rng, dil_w_rng)
metafunc.parametrize("convargs", fargs)
# -- conv tests --
def test_conv(backend_cpu64, convargs):
nin, nifm, fside, batch_size, dil_h, dil_w = convargs
fshape = (fside, fside, fside)
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
sz = nin * nin * nifm * batch_size
epsilon = 1.0e-5
inp = np.arange(sz) * 2.5 * epsilon
np.random.shuffle(inp)
inp = inp.reshape((nin * nin * nifm, batch_size))
lshape = (nifm, nin, nin)
init = Gaussian()
layer = ConvWithReset(fshape, strides=2, padding=fside-1,
dilation=dict(dil_d=1, dil_h=dil_h, dil_w=dil_w), init=init)
pert_frac = 0.1 # test 10% of the inputs
# select pert_frac fraction of inps to perturb
pert_cnt = int(np.ceil(inp.size * pert_frac))
pert_inds = np.random.permutation(inp.size)[0:pert_cnt]
(max_abs, max_rel) = general_gradient_comp(layer,
inp,
epsilon=epsilon,
lshape=lshape,
pert_inds=pert_inds)
assert max_abs < 1.0e-7
@pytest.mark.xfail(reason="Precision differences with MKL backend. #914")
def test_conv_mkl(backend_mkl, convargs):
nin, nifm, fside, batch_size, dil_h, dil_w = convargs
fshape = (fside, fside, fside)
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
sz = nin * nin * nifm * batch_size
epsilon = 1.0e-5
inp = np.arange(sz) * 2.5 * epsilon
np.random.shuffle(inp)
inp = inp.reshape((nin * nin * nifm, batch_size))
lshape = (nifm, nin, nin)
init = Gaussian()
layer = ConvWithReset(fshape, strides=2, padding=fside-1,
dilation=dict(dil_d=1, dil_h=dil_h, dil_w=dil_w), init=init)
pert_frac = 0.1 # test 10% of the inputs
# select pert_frac fraction of inps to perturb
pert_cnt = int(np.ceil(inp.size * pert_frac))
pert_inds = np.random.permutation(inp.size)[0:pert_cnt]
(max_abs, max_rel) = general_gradient_comp(layer,
inp,
epsilon=epsilon,
lshape=lshape,
pert_inds=pert_inds)
assert max_abs < 1.0e-7
| NervanaSystems/neon | tests/test_gradient_conv.py | Python | apache-2.0 | 4,573 | [
"Gaussian"
] | 0a0613fc5ff05d692f0c6df1dbc8565861b2f9f801eaee4618a5f8f4777ea24e |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Copyright (C) 2020 Stoq Tecnologia <http://www.stoq.com.br>
# All rights reserved
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., or visit: http://www.gnu.org/.
#
# Author(s): Stoq Team <dev@stoq.com.br>
#
import datetime
import decimal
import json
import os.path
from hashlib import md5
from stoqlib.api import api
import stoqserver
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
return str(obj)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def get_user_hash():
return md5(api.sysparam.get_string('USER_HASH').encode('UTF-8')).hexdigest()
def get_pytests_datadir(*subdirs):
data_dir = os.path.join(
os.path.dirname(os.path.dirname(stoqserver.__file__)), 'tests/data/', *subdirs)
return data_dir
| stoq/stoq-server | stoqserver/utils.py | Python | gpl-2.0 | 1,629 | [
"VisIt"
] | 26248a5d2845cafefabcee7ad1ecb0c82857b7b3f0da491abc2a66045f2aa083 |
'''
This module contains the one-parameter exponential families used
for fitting GLMs and GAMs.
These families are described in
P. McCullagh and J. A. Nelder. "Generalized linear models."
Monographs on Statistics and Applied Probability.
Chapman & Hall, London, 1983.
'''
from nipy.fixes.scipy.stats.models.family.family import Gaussian, Family, \
Poisson, Gamma, InverseGaussian, Binomial
| yarikoptic/NiPy-OLD | nipy/fixes/scipy/stats/models/family/__init__.py | Python | bsd-3-clause | 409 | [
"Gaussian"
] | 7e96c07952af403a8009d23b0cf39481b067e0a843be53db40b87fae492c278a |
from __future__ import print_function
import sys
from copy import deepcopy
# kills the program when you hit Cntl+C from the command line
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
from six import string_types
from pyNastran.gui.qt_version import qt_version
if qt_version == 4:
from PyQt4 import QtGui
from PyQt4.QtGui import (
QTreeView, QWidget, QAbstractItemView, QVBoxLayout, QPushButton, QApplication,
QComboBox)
elif qt_version == 5:
from PyQt5 import QtGui
from PyQt5.QtWidgets import (
QTreeView, QWidget, QAbstractItemView, QVBoxLayout, QPushButton, QApplication,
QComboBox)
elif qt_version == 'pyside':
from PySide import QtGui
from PySide.QtGui import (
QTreeView, QWidget, QAbstractItemView, QVBoxLayout, QPushButton, QApplication,
QComboBox)
else:
raise NotImplementedError('qt_version = %r' % qt_version)
class QTreeView2(QTreeView):
def __init__(self, data, choices):
self.old_rows = []
self.data = data
self.choices = choices
self.single = False
QTreeView.__init__(self)
def mousePressEvent(self, position):
QTreeView.mousePressEvent(self, position)
indexes = self.selectedIndexes()
# trace the tree to find the selected item
rows = []
for index in indexes:
row = index.row()
rows.append(row)
level = 0
while index.parent().isValid():
index = index.parent()
row = index.row()
rows.append(row)
level += 1
rows.reverse()
# TODO: what is this for???
if rows != self.old_rows:
self.old_rows = rows
valid, keys = self.get_row()
if not valid:
print('invalid=%s keys=%s' % (valid, keys))
else:
print('valid=%s keys=%s' % (valid, keys))
#print('choice =', self.choices[keys])
def get_row(self):
"""
gets the row
Returns
-------
is_valid : bool
is this case valid
row : None or tuple
None : invalid case
tuple : valid case
('centroid', None, [])
0 - the location (e.g. node, centroid)
1 - ???
2 - ???
"""
# if there's only 1 data member, we don't need to extract the data id
if self.single:
return True, self.data[0]
# TODO: what is this for???
irow = 0
data = deepcopy(self.data)
for row in self.old_rows:
try:
key = data[row][0]
except IndexError:
return False, irow
irow = data[row][1]
data = data[row][2]
if data:
return False, None
return True, irow
def set_single(self, single):
self.single = single
self.old_rows = [0]
#for subcase in subcases:
# for time in times:
# disp
# stress
# load
class Sidebar(QWidget):
"""
+--------------+
| Case/Results |
+==============+
| - a |
| - b1 |
| - b2 |
| - b3 |
+--------------+
For Nastran:
- a: Subcase 1
- b1. Displacement
- b2. Stress
- b3. Strain
For Cart3d:
- a1. Geometry
- b1. ElementID
- b2. Region
- a2. Results Case 1
- b1. U
- b2. V
- b3. W
+--------------+
| Sub-Result | (pulldown)
+==============+
| - a1 |
| - a2 |
| - a3 |
| - a4 |
+--------------+
For Nastran:
- a1: Displacement X
- a2. Displacement Y
- a3. Displacmenet Z
- a4. Displacmenet Mag
For Cart3d:
- NA (Greyed Out)
+----------------+
| Plot | (pulldown)
+================+
| - Fringe |
| - Marker |
| - Displacement |
+----------------+
- Cart3d -> Fringe (disabled)
+---------------+
| Scale Display | (text box)
+===============+
| 0 < x < 1000 | (not for fringe)
+---------------+
+--------------+
| Location | (pulldown)
+==============+
| - nodal |
| - centroidal |
+--------------+
(disabled)
+------------------+
| Complex Method | (pulldown)
+==================+
| - real | (usually set to real and disabled)
| - imag |
| - mag |
| - phase |
| - max over phase |
+------------------+
+--------------+
| Derive | (pulldown; only for nodal results)
+==============+
| - derive/avg | (default?)
| - avg/derive |
+--------------+
"""
def __init__(self, parent, debug=False):
QWidget.__init__(self)
self.parent = parent
self.debug = debug
data = []
data = [
("Alice", None, [
("Keys", 1, []),
("Purse", 2, [
("Cellphone", 3, [])
])
]),
("Bob", None, [
("Wallet", None, [
("Credit card", 4, []),
("Money", 5, [])
])
]),
]
choices = ['keys2', 'purse2', 'cellphone2', 'credit_card2', 'money2']
self.result_case_window = ResultsWindow('Case/Results', data, choices)
data = [
('A', 1, []),
#('B', 2, []),
#('C', 3, []),
]
self.result_data_window = ResultsWindow('Method', data, choices)
self.result_data_window.setVisible(False)
self.show_pulldown = False
if self.show_pulldown:
combo_options = ['a1', 'a2', 'a3']
self.pulldown = QComboBox()
self.pulldown.addItems(choices)
self.pulldown.activated[str].connect(self.on_pulldown)
self.apply_button = QPushButton('Apply', self)
self.apply_button.clicked.connect(self.on_apply)
self.setup_layout()
def setup_layout(self):
layout = QVBoxLayout()
layout.addWidget(self.result_case_window)
layout.addWidget(self.result_data_window)
if self.show_pulldown:
layout.addWidget(self.pulldown)
layout.addWidget(self.apply_button)
self.setLayout(layout)
self.clear_data()
def update_method(self, method):
if isinstance(method, str):
datai = self.result_data_window.data[0]
self.result_data_window.data[0] = (method, datai[1], datai[2])
print('method=%s datai=%s' % (method, datai))
self.result_data_window.update_data(self.result_data_window.data)
else:
return
datai = self.result_data_window.data[0]
asdf
def get_form(self):
return self.result_case_window.data
def update_results(self, data):
self.result_case_window.update_data(data)
self.apply_button.setEnabled(True)
def update_methods(self, data):
self.result_data_window.update_data(data)
self.apply_button.setEnabled(True)
def clear_data(self):
self.result_case_window.clear_data()
self.result_data_window.clear_data()
self.apply_button.setEnabled(False)
def on_pulldown(self, event):
print('pulldown...')
def on_apply(self, event):
data = self.result_case_window.data
valid_a, keys_a = self.result_case_window.treeView.get_row()
data = self.result_data_window.data
valid_b, keys_b = self.result_data_window.treeView.get_row()
if valid_a and valid_b:
if self.debug:
print(' rows1 = %s' % self.result_case_window.treeView.old_rows)
print(' = %s' % str(keys_a))
print(' rows2 = %s' % self.result_data_window.treeView.old_rows)
print(' = %s' % str(keys_b))
else:
self.update_vtk_window(keys_a, keys_b)
def update_vtk_window(self, keys_a, keys_b):
if 0:
print('keys_a = %s' % str(keys_a))
for i, key in enumerate(self.parent.case_keys):
if key[1] == keys_a[0]:
break
print('*i=%s key=%s' % (i, str(key)))
#self.parent.update_vtk_window_by_key(i)
result_name = key[1]
#self.parent.cycle_results_explicit(result_name=result_name, explicit=True)
#j = self.parent._get_icase(result_name)
#j = i
i = keys_a
result_name = None
self.parent._set_case(result_name, i, explicit=True)
class ResultsWindow(QWidget):
def __init__(self, name, data, choices):
QWidget.__init__(self)
self.name = name
self.data = data
self.choices = choices
self.treeView = QTreeView2(self.data, choices)
self.treeView.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.model = QtGui.QStandardItemModel()
is_single = self.addItems(self.model, data)
self.treeView.setModel(self.model)
self.treeView.set_single(is_single)
self.model.setHorizontalHeaderLabels([self.tr(self.name)])
layout = QVBoxLayout()
layout.addWidget(self.treeView)
self.setLayout(layout)
def update_data(self, data):
self.clear_data()
self.data = data
try:
self.addItems(self.model, data)
except:
adf
if isinstance(data, string_types):
self.addItems(self.model, data)
else:
self.addItems(self.model, *tuple(data))
self.treeView.data = data
#layout = QVBoxLayout()
#layout.addWidget(self.treeView)
#self.setLayout(layout)
def clear_data(self):
self.model.clear()
self.treeView.data = []
self.model.setHorizontalHeaderLabels([self.tr(self.name)])
def addItems(self, parent, elements, level=0, count_check=False):
nelements = len(elements)
redo = False
#print(elements[0])
try:
#if len(elements):
#assert len(elements[0]) == 3, 'len=%s elements[0]=%s\nelements=\n%s\n' % (
#len(elements[0]), elements[0], elements)
for element in elements:
#if isinstance(element, str):
#print('elements = %r' % str(elements))
#print('element = %r' % str(element))
if not len(element) == 3:
print('element = %r' % str(element))
text, i, children = element
nchildren = len(children)
#print('text=%r' % text)
item = QtGui.QStandardItem(text)
parent.appendRow(item)
# TODO: count_check and ???
if nelements == 1 and nchildren == 0 and level == 0:
#self.result_data_window.setEnabled(False)
item.setEnabled(False)
#print(dir(self.treeView))
#self.treeView.setCurrentItem(self, 0)
#item.mousePressEvent(None)
redo = True
else:
pass
#print('item=%s count_check=%s nelements=%s nchildren=%s' % (
#text, count_check, nelements, nchildren))
if children:
assert isinstance(children, list), children
self.addItems(item, children, level + 1, count_check=count_check)
is_single = redo
return is_single
except ValueError:
print()
print('elements =', elements)
print('element =', element)
print('len(elements)=%s' % len(elements))
for e in elements:
print(' e = %s' % str(e))
raise
#if redo:
# data = [
# ('A', []),
# ('B', []),
# ]
# self.update_data(data)
def main():
app = QApplication(sys.argv)
window = Sidebar(app, debug=True)
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| saullocastro/pyNastran | pyNastran/gui/menus/results_sidebar.py | Python | lgpl-3.0 | 12,414 | [
"ADF"
] | f82508a9a4184182b866681a16333060ed3e02da8aa1686c64d0017709d3476e |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import platform
import re
import ast
import sys
from setuptools import find_packages, setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext as _build_ext
if sys.version_info.major != 3:
sys.exit("scikit-bio can only be used with Python 3. You are currently "
"running Python %d." % sys.version_info.major)
# Bootstrap setup.py with numpy
# Huge thanks to coldfix's solution
# http://stackoverflow.com/a/21621689/579416
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('skbio/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python :: 3
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = ('Data structures, algorithms and educational '
'resources for bioinformatics.')
with open('README.rst') as f:
long_description = f.read()
# Dealing with Cython
USE_CYTHON = os.environ.get('USE_CYTHON', False)
ext = '.pyx' if USE_CYTHON else '.c'
# There's a bug in some versions of Python 3.4 that propagates
# -Werror=declaration-after-statement to extensions, instead of just affecting
# the compilation of the interpreter. See http://bugs.python.org/issue21121 for
# details. This acts as a workaround until the next Python 3 release -- thanks
# Wolfgang Maier (wolma) for the workaround!
ssw_extra_compile_args = ['-Wno-error=declaration-after-statement']
# Users with i686 architectures have reported that adding this flag allows
# SSW to be compiled. See https://github.com/biocore/scikit-bio/issues/409 and
# http://stackoverflow.com/q/26211814/3776794 for details.
if platform.machine() == 'i686':
ssw_extra_compile_args.append('-msse2')
extensions = [
Extension("skbio.stats.__subsample",
["skbio/stats/__subsample" + ext]),
Extension("skbio.alignment._ssw_wrapper",
["skbio/alignment/_ssw_wrapper" + ext,
"skbio/alignment/_lib/ssw.c"],
extra_compile_args=ssw_extra_compile_args),
Extension("skbio.diversity._phylogenetic",
["skbio/diversity/_phylogenetic" + ext])
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='scikit-bio',
version=version,
license='BSD',
description=description,
long_description=long_description,
author="scikit-bio development team",
author_email="gregcaporaso@gmail.com",
maintainer="scikit-bio development team",
maintainer_email="gregcaporaso@gmail.com",
url='http://scikit-bio.org',
packages=find_packages(),
ext_modules=extensions,
cmdclass={'build_ext': build_ext},
setup_requires=['numpy >= 1.9.2'],
install_requires=[
'lockfile >= 0.10.2', # req'd for our usage of CacheControl
'CacheControl >= 0.11.5',
'decorator >= 3.4.2',
'IPython >= 3.2.0',
'matplotlib >= 1.4.3',
'natsort >= 4.0.3',
'numpy >= 1.9.2',
'pandas >= 0.18.0',
'scipy >= 0.15.1',
'nose >= 1.3.7'
],
classifiers=classifiers,
package_data={
'skbio.diversity.alpha.tests': ['data/qiime-191-tt/*'],
'skbio.diversity.beta.tests': ['data/qiime-191-tt/*'],
'skbio.io.tests': ['data/*'],
'skbio.io.format.tests': ['data/*'],
'skbio.stats.tests': ['data/*'],
'skbio.stats.distance.tests': ['data/*'],
'skbio.stats.ordination.tests': ['data/*']
}
)
| anderspitman/scikit-bio | setup.py | Python | bsd-3-clause | 4,877 | [
"scikit-bio"
] | bd244aea61eb4acb7635f7a4f15ef44fce9502a290206ae003d4d5a4d7a3d353 |
#!/usr/bin/python
import os
import json
import urllib
import subprocess
import sys
url_base = "http://admin.ci.centos.org:8080"
api_key = os.environ['API_KEY']
count = os.environ['MACHINE_COUNT'] if os.environ.get('MACHINE_COUNT') != None else "1"
ver = "7"
arch = "x86_64"
req_url = "%s/Node/get?key=%s&ver=%s&arch=%s&count=%s" % (url_base, api_key, ver, arch, count)
jsondata = urllib.urlopen(req_url).read()
data = json.loads(jsondata)
# Setup some variables. Can be passed as env variables via the job config. Otherwise defaults apply
repo_url = os.environ['REPO_URL'] if os.environ.get('REPO_URL') != None else 'https://github.com/projectatomic/vagrant-service-manager.git'
branch = os.environ['BRANCH'] if os.environ.get('BRANCH') != None else 'master'
def execute_on_host( host, cmd, error_message ):
# build command to execute install and test commands via ssh
ssh_cmd = "ssh -t -t "
ssh_cmd += "-o UserKnownHostsFile=/dev/null "
ssh_cmd += "-o StrictHostKeyChecking=no "
ssh_cmd += "root@%s " % (host)
cmd = '%s "%s"' % (ssh_cmd, cmd)
print "Executing: %s" % (cmd)
exit_code = subprocess.call(cmd, shell=True)
if exit_code != 0 : sys.exit(error_message)
return
def prepare_pull_request_build(host):
pr_branch = os.environ['ghprbSourceBranch']
pr_author_repo = os.environ['ghprbAuthorRepoGitUrl']
branch_cmd = 'cd vagrant-service-manager && '
branch_cmd += "git checkout -b %s" % (pr_branch)
execute_on_host(host, branch_cmd, "Unable to create branch for pull request build")
pull_cmd = 'cd vagrant-service-manager && '
pull_cmd += "git pull --no-edit %s %s " % (pr_author_repo, pr_branch)
execute_on_host(host, pull_cmd, "Unable to pull pull request")
return
for host in data['hosts']:
# run the Ansible playbook
ansible_cmd = 'yum -y install git epel-release ansible1.9 && '
ansible_cmd += 'yum -y install ansible1.9 && '
ansible_cmd += 'git clone %s && ' % repo_url
ansible_cmd += 'cd vagrant-service-manager && '
ansible_cmd += 'git checkout %s && ' % branch
ansible_cmd += 'cd .ci/ansible && '
ansible_cmd += 'ANSIBLE_NOCOLOR=1 ansible-playbook site.yml'
execute_on_host(host, ansible_cmd, "Ansible playbook failed")
# if we deal with a pull request build we need to prepare the source
if os.environ.get('ghprbPullId') != None:
prepare_pull_request_build(host)
# setup the environment
setup_cmd = 'cd vagrant-service-manager && '
setup_cmd += 'gem install bundler -v 1.12.5 && '
setup_cmd += 'bundle install --no-color'
execute_on_host(host, setup_cmd, "Unable to setup Ruby environment")
# run build and features
build_cmd = 'cd vagrant-service-manager && '
build_cmd += 'bundle exec rake rubocop && '
build_cmd += 'bundle exec rake test && '
build_cmd += 'bundle exec rake features CUCUMBER_OPTS=\'-p ci\' PROVIDER=libvirt BOX=adb,cdk && '
build_cmd += 'bundle exec rake build'
execute_on_host(host, build_cmd, "Tests failures")
done_nodes_url = "%s/Node/done?key=%s&sside=%s" % (url_base, api_key, data['ssid'])
print urllib.urlopen(done_nodes_url)
| budhrg/vagrant-service-manager | .ci/jenkins-execute-script.py | Python | gpl-2.0 | 3,174 | [
"CDK"
] | b2e9d0e5f53b16792b2efae65640a37311b41b0f8deabbdac1aaf0cf47fb49a3 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Elk(MakefilePackage):
'''An all-electron full-potential linearised augmented-plane wave
(FP-LAPW) code with many advanced features.'''
homepage = 'http://elk.sourceforge.net/'
url = 'https://sourceforge.net/projects/elk/files/elk-3.3.17.tgz'
version('3.3.17', 'f57f6230d14f3b3b558e5c71f62f0592')
# Elk provides these libraries, but allows you to specify your own
variant('blas', default=True,
description='Build with custom BLAS library')
variant('lapack', default=True,
description='Build with custom LAPACK library')
variant('fft', default=True,
description='Build with custom FFT library')
# Elk does not provide these libraries, but allows you to use them
variant('mpi', default=True,
description='Enable MPI parallelism')
variant('openmp', default=True,
description='Enable OpenMP support')
variant('libxc', default=True,
description='Link to Libxc functional library')
depends_on('blas', when='+blas')
depends_on('lapack', when='+lapack')
depends_on('fftw', when='+fft')
depends_on('mpi@2:', when='+mpi')
depends_on('libxc', when='+libxc')
# Cannot be built in parallel
parallel = False
def edit(self, spec, prefix):
# Dictionary of configuration options
config = {
'MAKE': 'make',
'AR': 'ar'
}
# Compiler-specific flags
flags = ''
if self.compiler.name == 'intel':
flags = '-O3 -ip -unroll -no-prec-div'
elif self.compiler.name == 'gcc':
flags = '-O3 -ffast-math -funroll-loops'
elif self.compiler.name == 'pgi':
flags = '-O3 -lpthread'
elif self.compiler.name == 'g95':
flags = '-O3 -fno-second-underscore'
elif self.compiler.name == 'nag':
flags = '-O4 -kind=byte -dusty -dcfuns'
elif self.compiler.name == 'xl':
flags = '-O3'
config['F90_OPTS'] = flags
config['F77_OPTS'] = flags
# BLAS/LAPACK support
# Note: BLAS/LAPACK must be compiled with OpenMP support
# if the +openmp variant is chosen
blas = 'blas.a'
lapack = 'lapack.a'
if '+blas' in spec:
blas = spec['blas'].libs.joined()
if '+lapack' in spec:
lapack = spec['lapack'].libs.joined()
# lapack must come before blas
config['LIB_LPK'] = ' '.join([lapack, blas])
# FFT support
if '+fft' in spec:
config['LIB_FFT'] = join_path(spec['fftw'].prefix.lib,
'libfftw3.so')
config['SRC_FFT'] = 'zfftifc_fftw.f90'
else:
config['LIB_FFT'] = 'fftlib.a'
config['SRC_FFT'] = 'zfftifc.f90'
# MPI support
if '+mpi' in spec:
config['F90'] = spec['mpi'].mpifc
config['F77'] = spec['mpi'].mpif77
else:
config['F90'] = spack_fc
config['F77'] = spack_f77
config['SRC_MPI'] = 'mpi_stub.f90'
# OpenMP support
if '+openmp' in spec:
config['F90_OPTS'] += ' ' + self.compiler.openmp_flag
config['F77_OPTS'] += ' ' + self.compiler.openmp_flag
else:
config['SRC_OMP'] = 'omp_stub.f90'
# Libxc support
if '+libxc' in spec:
config['LIB_libxc'] = ' '.join([
join_path(spec['libxc'].prefix.lib, 'libxcf90.so'),
join_path(spec['libxc'].prefix.lib, 'libxc.so')
])
config['SRC_libxc'] = ' '.join([
'libxc_funcs.f90',
'libxc.f90',
'libxcifc.f90'
])
else:
config['SRC_libxc'] = 'libxcifc_stub.f90'
# Write configuration options to include file
with open('make.inc', 'w') as inc:
for key in config:
inc.write('{0} = {1}\n'.format(key, config[key]))
def install(self, spec, prefix):
# The Elk Makefile does not provide an install target
mkdir(prefix.bin)
install('src/elk', prefix.bin)
install('src/eos/eos', prefix.bin)
install('src/spacegroup/spacegroup', prefix.bin)
install_tree('examples', join_path(prefix, 'examples'))
install_tree('species', join_path(prefix, 'species'))
| EmreAtes/spack | var/spack/repos/builtin/packages/elk/package.py | Python | lgpl-2.1 | 5,721 | [
"Elk"
] | a95b3e0f40e920d8ec1dcddc16ba83fdf4432d62218f598239b8cde446439655 |
# These tests don't work at the moment, due to the security_groups multi select not working
# in selenium (the group is selected then immediately reset)
from textwrap import dedent
import pytest
from riggerlib import recursive_update
from widgetastic_patternfly import CheckableBootstrapTreeview as Check_tree
from cfme import test_requirements
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils import normalize_text
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.update import update
from cfme.utils.wait import TimedOutError
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.meta(server_roles="+automate +notifier"),
test_requirements.provision,
pytest.mark.tier(2),
pytest.mark.provider(gen_func=providers,
filters=[ProviderFilter(classes=[CloudProvider, InfraProvider],
required_flags=['provision'])],
scope="function"),
pytest.mark.usefixtures('setup_provider')
]
@pytest.fixture()
def vm_name():
return random_vm_name(context='prov', max_length=12)
@pytest.fixture()
def instance_args(request, provider, provisioning, vm_name):
""" Fixture to prepare instance parameters for provisioning
"""
inst_args = dict(template_name=provisioning.get('image', {}).get('name') or provisioning.get(
'template'))
if not inst_args.get('template_name'):
pytest.skip(reason='template name not specified in the provisioning in config')
# Base instance info
inst_args['request'] = {
'notes': 'Testing provisioning from image {} to vm {} on provider {}'
.format(inst_args.get('template_name'), vm_name, provider.key),
}
# Check whether auto-selection of environment is passed
auto = False # By default provisioning will be manual
try:
parameter = request.param
auto = parameter
except AttributeError:
# in case nothing was passed just skip
pass
if auto:
inst_args.update({'environment': {'automatic_placement': auto}})
yield vm_name, inst_args
@pytest.fixture
def provisioned_instance(provider, instance_args, appliance):
""" Checks provisioning status for instance """
vm_name, inst_args = instance_args
collection = appliance.provider_based_collection(provider)
instance = collection.create(vm_name, provider, form_values=inst_args)
if not instance:
raise Exception("instance returned by collection.create is 'None'")
yield instance
logger.info('Instance cleanup, deleting %s', instance.name)
try:
instance.cleanup_on_provider()
except Exception as ex:
logger.warning('Exception while deleting instance fixture, continuing: {}'
.format(ex.message))
@pytest.mark.parametrize('instance_args', [True, False], ids=["Auto", "Manual"], indirect=True)
def test_provision_from_template(provider, provisioned_instance):
""" Tests instance provision from template via CFME UI
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
caseimportance: critical
casecomponent: Provisioning
initialEstimate: 1/4h
"""
assert provisioned_instance.exists_on_provider, "Instance wasn't provisioned successfully"
@pytest.mark.provider([GCEProvider], required_fields=[['provisioning', 'image']])
@pytest.mark.usefixtures('setup_provider')
def test_gce_preemptible_provision(appliance, provider, instance_args, soft_assert):
"""
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/6h
"""
vm_name, inst_args = instance_args
inst_args['properties']['is_preemptible'] = True
instance = appliance.collections.cloud_instances.create(vm_name,
provider,
form_values=inst_args)
view = navigate_to(instance, "Details")
preemptible = view.entities.summary("Properties").get_text_of("Preemptible")
soft_assert('Yes' in preemptible, "GCE Instance isn't Preemptible")
soft_assert(instance.exists_on_provider, "Instance wasn't provisioned successfully")
@pytest.mark.rhv2
@pytest.mark.meta(automates=[1472844])
@pytest.mark.parametrize("edit", [True, False], ids=["edit", "approve"])
def test_provision_approval(appliance, provider, vm_name, smtp_test, request,
edit, soft_assert):
""" Tests provisioning approval. Tests couple of things.
* Approve manually
* Approve by editing the request to conform
Prerequisities:
* A provider that can provision.
* Automate role enabled
* User with e-mail set so you can receive and view them
Steps:
* Create a provisioning request that does not get automatically approved (eg. ``num_vms``
bigger than 1)
* Wait for an e-mail to come, informing you that approval is pending
* Depending on whether you want to do manual approval or edit approval, do:
* MANUAL: manually approve the request in UI
* EDIT: Edit the request in UI so it conforms the rules for auto-approval.
* Wait for an e-mail with approval
* Wait until the request finishes
* Wait until an email with provisioning complete
Metadata:
test_flag: provision
suite: infra_provisioning
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/8h
"""
# generate_tests makes sure these have values
# template, host, datastore = map(provisioning.get, ('template', 'host', 'datastore'))
# It will provision two of them
# All the subject checks are normalized, because of newlines and capitalization
vm_names = [vm_name + "001", vm_name + "002"]
if provider.one_of(CloudProvider):
requester = ""
vm_type = "instance"
else:
requester = "vm_provision@cfmeqe.com " # include trailing space for clean formatting
vm_type = "virtual machine"
collection = appliance.provider_based_collection(provider)
inst_args = {
'catalog': {
'vm_name': vm_name,
'num_vms': '2'
}
}
vm = collection.create(vm_name, provider, form_values=inst_args, wait=False)
try:
wait_for(
lambda: len(smtp_test.get_emails()) >= 2,
num_sec=90,
delay=3
)
except TimedOutError:
pytest.fail('Did not receive at least 2 emails from provisioning request, received: {}'
.format(smtp_test.get_emails()))
pending_subject = normalize_text("your {} request is pending".format(vm_type))
# requester includes the trailing space
pending_from = normalize_text("{} request from {}pending approval".format(vm_type, requester))
received_pending = [normalize_text(m["subject"]) for m in smtp_test.get_emails()]
# Looking for each expected subject in the list of received subjects with partial match
for subject in [pending_subject, pending_from]:
soft_assert(any(subject in r_sub for r_sub in received_pending),
'Expected subject [{}], not matched in received subjects [{}]'
.format(subject, received_pending))
smtp_test.clear_database()
cells = {'Description': 'Provision from [{}] to [{}###]'.format(vm.template_name, vm.name)}
provision_request = appliance.collections.requests.instantiate(cells=cells)
if edit:
# Automatic approval after editing the request to conform
new_vm_name = '{}-xx'.format(vm_name)
modifications = {
'catalog': {
'num_vms': "1",
'vm_name': new_vm_name
},
'Description': 'Provision from [{}] to [{}]'.format(vm.template_name, new_vm_name)
}
provision_request.edit_request(values=modifications)
vm_names = [new_vm_name] # Will be just one now
request.addfinalizer(
lambda: collection.instantiate(new_vm_name, provider).cleanup_on_provider()
)
else:
# Manual approval
provision_request.approve_request(method='ui', reason="Approved")
vm_names = [vm_name + "001", vm_name + "002"] # There will be two VMs
request.addfinalizer(
lambda: [appliance.collections.infra_vms.instantiate(v_name,
provider).cleanup_on_provider()
for v_name in vm_names]
)
try:
wait_for(
lambda: len(smtp_test.get_emails()) >= 2,
num_sec=90,
delay=3
)
except TimedOutError:
pytest.fail('Did not receive at least 1 emails from provisioning request, received: {}'
.format(smtp_test.get_emails()))
# requester includes the trailing space
approved_subject = normalize_text("your {} request was approved".format(vm_type))
approved_from = normalize_text("{} request from {}was approved".format(vm_type, requester))
received_approved = [normalize_text(m["subject"]) for m in smtp_test.get_emails()]
# Looking for each expected subject in the list of received subjects with partial match
for subject in [approved_subject, approved_from]:
soft_assert(any(subject in r_sub for r_sub in received_approved),
'Expected subject [{}], not matched in received subjects [{}]'
.format(subject, received_approved))
smtp_test.clear_database()
# Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup
logger.info('Waiting for vms %s to appear on provider %s', ", ".join(vm_names), provider.key)
wait_for(
lambda: all(map(provider.mgmt.does_vm_exist, vm_names)),
handle_exception=True,
num_sec=600
)
provision_request.wait_for_request(method='ui')
msg = "Provisioning failed with the message {}".format(provision_request.row.last_message.text)
assert provision_request.is_succeeded(method='ui'), msg
# account for multiple vms, specific names
completed_subjects = [
normalize_text("your {} request has completed vm name {}".format(vm_type, name))
for name in vm_names
]
expected_subject_count = len(vm_names)
# Wait for e-mails to appear
try:
wait_for(
lambda: len(smtp_test.get_emails()) >= expected_subject_count,
message="provisioning request completed emails",
delay=5
)
except TimedOutError:
pytest.fail('Did not receive enough emails (> {}) from provisioning request, received: {}'
.format(expected_subject_count, smtp_test.get_emails()))
received_complete = [normalize_text(m['subject']) for m in smtp_test.get_emails()]
for expected_subject in completed_subjects:
soft_assert(
any(expected_subject in subject for subject in received_complete),
'Expected subject [{}], not matched in received subjects [{}]'
.format(subject, received_complete)
)
@test_requirements.rest
@pytest.mark.parametrize('auto', [True, False], ids=["Auto", "Manual"])
@pytest.mark.meta(blockers=[
BZ(1720751, unblock=lambda provider: not provider.one_of(SCVMMProvider))
])
def test_provision_from_template_using_rest(appliance, request, provider, vm_name, auto):
""" Tests provisioning from a template using the REST API.
Metadata:
test_flag: provision, rest
Polarion:
assignee: pvala
casecomponent: Provisioning
caseimportance: high
initialEstimate: 1/30h
"""
if auto:
form_values = {"vm_fields": {"placement_auto": True}}
else:
form_values = None
collection = appliance.provider_based_collection(provider)
instance = collection.create_rest(vm_name, provider, form_values=form_values)
wait_for(
lambda: instance.exists,
num_sec=1000, delay=5, message="VM {} becomes visible".format(vm_name)
)
@request.addfinalizer
def _cleanup():
logger.info('Instance cleanup, deleting %s', instance.name)
try:
instance.cleanup_on_provider()
except Exception as ex:
logger.warning('Exception while deleting instance fixture, continuing: {}'
.format(ex.message))
@pytest.fixture(scope="module")
def original_request_class(appliance):
return (appliance.collections.domains.instantiate(name='ManageIQ')
.namespaces.instantiate(name='Cloud')
.namespaces.instantiate(name='VM')
.namespaces.instantiate(name='Provisioning')
.namespaces.instantiate(name='StateMachines')
.classes.instantiate(name='Methods'))
@pytest.fixture(scope="module")
def modified_request_class(request, domain, original_request_class):
original_request_class.copy_to(domain)
klass = (domain
.namespaces.instantiate(name='Cloud')
.namespaces.instantiate(name='VM')
.namespaces.instantiate(name='Provisioning')
.namespaces.instantiate(name='StateMachines')
.classes.instantiate(name='Methods'))
request.addfinalizer(klass.delete_if_exists)
return klass
@pytest.fixture(scope="module")
def copy_domains(original_request_class, domain):
methods = ['openstack_PreProvision', 'openstack_CustomizeRequest']
for method in methods:
original_request_class.methods.instantiate(name=method).copy_to(domain)
# Not collected for EC2 in generate_tests above
@pytest.mark.meta(blockers=[BZ(1713632)])
@pytest.mark.parametrize("disks", [1, 2])
@pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']])
def test_cloud_provision_from_template_with_attached_disks(
appliance, request, instance_args, provider, disks, soft_assert, domain,
modified_request_class, copy_domains, provisioning):
""" Tests provisioning from a template and attaching disks
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
vm_name, inst_args = instance_args
# Modify availiability_zone for Azure provider
if provider.one_of(AzureProvider):
recursive_update(inst_args, {'environment': {'availability_zone': provisioning("av_set")}})
device_name = "vd{}"
device_mapping = []
volumes = provider.mgmt.volume_configurations(1, n=disks)
@request.addfinalizer
def delete_volumes():
for volume in volumes:
provider.mgmt.delete_volume(volume)
# Set up automate
for i, volume in enumerate(volumes, 0):
# note the boot_index specifies an ordering in which the disks are tried to
# boot from. The value -1 means "never".
device_mapping.append(
{'boot_index': 0 if i == 0 else -1,
'uuid': volume,
'device_name': device_name.format(chr(ord("a") + i))})
if i == 0:
provider.mgmt.capi.volumes.set_bootable(volume, True)
method = modified_request_class.methods.instantiate(name="openstack_PreProvision")
view = navigate_to(method, 'Details')
former_method_script = view.script.get_value()
disk_mapping = []
for mapping in device_mapping:
one_field = dedent("""{{
:boot_index => {boot_index},
:uuid => "{uuid}",
:device_name => "{device_name}",
:source_type => "volume",
:destination_type => "volume",
:volume_size => 1,
:delete_on_termination => false
}}""")
disk_mapping.append(one_field.format(**mapping))
volume_method = dedent("""
clone_options = {{
:image_ref => nil,
:block_device_mapping_v2 => [
{}
]
}}
prov = $evm.root["miq_provision"]
prov.set_option(:clone_options, clone_options)
""")
with update(method):
method.script = volume_method.format(",\n".join(disk_mapping))
@request.addfinalizer
def _finish_method():
with update(method):
method.script = former_method_script
instance = appliance.collections.cloud_instances.create(vm_name,
provider,
form_values=inst_args)
@request.addfinalizer
def delete_vm_and_wait_for_gone():
instance.cleanup_on_provider()
wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5)
for volume_id in volumes:
attachments = provider.mgmt.volume_attachments(volume_id)
soft_assert(
vm_name in attachments,
'The vm {} not found among the attachemnts of volume {}:'.format(
vm_name, volume_id, attachments))
for device in device_mapping:
provider_devpath = provider.mgmt.volume_attachments(device['uuid'])[vm_name]
expected_devpath = '/dev/{}'.format(device['device_name'])
soft_assert(
provider_devpath == expected_devpath,
'Device {} is not attached to expected path: {} but to: {}'.format(
device['uuid'], expected_devpath, provider_devpath))
# Not collected for EC2 in generate_tests above
@pytest.mark.meta(blockers=[BZ(1746931)])
@pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']])
def test_provision_with_boot_volume(request, instance_args, provider, soft_assert,
modified_request_class, appliance, copy_domains):
""" Tests provisioning from a template and attaching one booting volume.
Metadata:
test_flag: provision, volumes
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
vm_name, inst_args = instance_args
image = inst_args.get('template_name')
volume = provider.mgmt.create_volume(1, imageRef=provider.mgmt.get_template(image).uuid)
request.addfinalizer(lambda: provider.mgmt.delete_volume(volume))
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
view = navigate_to(method, 'Details')
former_method_script = view.script.get_value()
with update(method):
method.script = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {{
:image_ref => nil,
:block_device_mapping_v2 => [{{
:boot_index => 0,
:uuid => "{}",
:device_name => "vda",
:source_type => "volume",
:destination_type => "volume",
:volume_size => 1,
:delete_on_termination => false
}}]
}}
)
'''.format(volume))
@request.addfinalizer
def _finish_method():
with update(method):
method.script = former_method_script
instance = appliance.collections.cloud_instances.create(vm_name,
provider,
form_values=inst_args)
@request.addfinalizer
def delete_vm_and_wait_for_gone():
instance.cleanup_on_provider() # To make it possible to delete the volume
wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5)
request_description = 'Provision from [{}] to [{}]'.format(image, instance.name)
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
msg = "Provisioning failed with the message {}".format(
provision_request.row.last_message.text)
assert provision_request.is_succeeded(method='ui'), msg
soft_assert(instance.name in provider.mgmt.volume_attachments(volume))
soft_assert(provider.mgmt.volume_attachments(volume)[instance.name] == "/dev/vda")
# Not collected for EC2 in generate_tests above
@pytest.mark.meta(blockers=[BZ(1746931)])
@pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']])
def test_provision_with_additional_volume(request, instance_args, provider, small_template,
soft_assert, modified_request_class, appliance,
copy_domains):
""" Tests provisioning with setting specific image from AE and then also making it create and
attach an additional 3G volume.
Metadata:
test_flag: provision, volumes
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
vm_name, inst_args = instance_args
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
try:
image_id = provider.mgmt.get_template(small_template.name).uuid
except KeyError:
pytest.skip("No small_template in provider data!")
view = navigate_to(method, 'Details')
former_method_script = view.script.get_value()
with update(method):
method.script = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {{
:image_ref => nil,
:block_device_mapping_v2 => [{{
:boot_index => 0,
:uuid => "{}",
:device_name => "vda",
:source_type => "image",
:destination_type => "volume",
:volume_size => 3,
:delete_on_termination => false
}}]
}}
)
'''.format(image_id))
@request.addfinalizer
def _finish_method():
with update(method):
method.script = former_method_script
def cleanup_and_wait_for_instance_gone():
instance.mgmt.refresh()
prov_instance_raw = instance.mgmt.raw
instance_volumes = getattr(prov_instance_raw, 'os-extended-volumes:volumes_attached')
instance.cleanup_on_provider()
wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5)
# Delete the volumes.
for volume in instance_volumes:
provider.mgmt.delete_volume(volume['id'])
instance = appliance.collections.cloud_instances.create(
vm_name, provider, form_values=inst_args)
request.addfinalizer(cleanup_and_wait_for_instance_gone)
request_description = 'Provision from [{}] to [{}]'.format(small_template.name, instance.name)
provision_request = appliance.collections.requests.instantiate(request_description)
try:
provision_request.wait_for_request(method='ui')
except Exception as e:
logger.info(
"Provision failed {}: {}".format(e, provision_request.request_state))
raise
assert provision_request.is_succeeded(method='ui'), (
"Provisioning failed with the message {}".format(
provision_request.row.last_message.text))
instance.mgmt.refresh()
prov_instance_raw = instance.mgmt.raw
assert hasattr(prov_instance_raw, 'os-extended-volumes:volumes_attached')
volumes_attached = getattr(prov_instance_raw, 'os-extended-volumes:volumes_attached')
assert len(volumes_attached) == 1
volume_id = volumes_attached[0]["id"]
assert provider.mgmt.volume_exists(volume_id)
volume = provider.mgmt.get_volume(volume_id)
assert volume.size == 3
@test_requirements.tag
def test_provision_with_tag(appliance, vm_name, tag, provider, request):
""" Tests tagging instance using provisioning dialogs.
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, pick a tag.
* Submit the provisioning request and wait for it to finish.
* Visit instance page, it should display the selected tags
Metadata:
test_flag: provision
Polarion:
assignee: anikifor
casecomponent: Tagging
initialEstimate: 1/4h
"""
inst_args = {'purpose': {
'apply_tags': Check_tree.CheckNode(
['{} *'.format(tag.category.display_name), tag.display_name])}}
collection = appliance.provider_based_collection(provider)
instance = collection.create(vm_name, provider, form_values=inst_args)
request.addfinalizer(instance.cleanup_on_provider)
assert tag in instance.get_tags(), 'Provisioned instance does not have expected tag'
@pytest.mark.tier(2)
@test_requirements.multi_region
@test_requirements.provision
@pytest.mark.long_running
def test_provision_from_template_from_global_region(setup_multi_region_cluster,
multi_region_cluster,
activate_global_appliance,
setup_remote_provider,
provisioned_instance):
"""
Polarion:
assignee: izapolsk
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/10h
"""
assert provisioned_instance.exists_on_provider, "Instance wasn't provisioned successfully"
| izapolsk/integration_tests | cfme/tests/cloud_infra_common/test_provisioning.py | Python | gpl-2.0 | 26,237 | [
"VisIt"
] | e4853e74316187cc537f3b1d454db1e5e17b7bb5f84fc04a7c8967d915f2f162 |
"""
aRMSD log functions
(c) 2017 by Arne Wagner
"""
# Authors: Arne Wagner
# License: MIT
from __future__ import absolute_import, division, print_function
from builtins import range, input
try:
import numpy as np
except ImportError:
pass
class Logger(object):
""" An object used for logging / plotting messages on screen """
def __init__(self, __version__, year):
""" Initializes messenger """
self.version = __version__ # Program version
self.year = year # Year
self.file_name = 'aRMSD_logfile.out' # Name of the designated outfile
self.file_mol1 = None # File name of molecule 1
self.file_mol2 = None # File name of molecule 2
self.name_mol1 = 'Model' # Name of molecule 1
self.name_mol2 = 'Reference' # Name of molecule 2
self.has_requirements = False # If requirements are met
self.has_cor_std_mol1 = False # If molecule coordinates have standard deviations (molecule 1)
self.has_cor_std_mol2 = False # If molecule coordinates have standard deviations (molecule 2)
self.use_std = True # If standard deviations are to be used
self.disorder_mol1 = False # If atoms on identical positions were found (molecule 1)
self.disorder_mol2 = False # If atoms on identical positions were found (molecule 2)
self.file_import = False # If data import was successful
self.chk_for_unit = False # If units of the coordinates were checked
self.chk_for_unit_warn = False # If number of atoms is sufficient for unit check
self.unit_transform = False # If coordinate units were transformed (b2a)
self.has_sub = False # If a substructure has been defined
self.cons_init_at_mol1 = None # Initial Number of atoms in molecule 1
self.cons_init_at_mol2 = None # Initial Number of atoms in molecule 2
self.cons_init_at_H_mol1 = None # Initial Number of H-atoms in molecule 1
self.cons_init_at_H_mol2 = None # Initial Number of H-atoms in molecule 2
self.rem_H_btc = False # If hydrogens bound to carbons have been removed
self.rem_H_btg14 = False # If hydrogens bound to group-14 atoms have been removed
self.rem_H_all = False # If all hydrogen atoms have been removed
self.can_rem_H_btc = True # If hydrogens bound to carbons can be removed
self.can_rem_H_btg14 = True # If hydrogens bound to group-14 atoms can be removed
self.user_choice_rem_all_H = False # User choice to remove all H atoms
self.user_choice_rem_btc_H = False # User choice to remove all H atoms
self.user_choice_rem_btg14_H = False # User choice to remove all H atoms
self.n_atoms = None # Final number of atoms after consistency
self.rot_to_std = None # Rotation matrix for standard orientation
self.use_groups = False # If PSE groups are to be used in matching algorithm
self.consist = False # If the two molecular strucutres are consistent
self.match_alg = 'distance' # Matching algorithm
self.match_solv = 'hungarian' # Solver used in matching process
self.is_matched = False # If the molecules were matched
self.was_saved = False # If a status has been saved (checkpoint)
self.xfs_energy = None # Energy of X-ray source (None is unused)
self.prop_bnd_dist_rmsd = None
self.prop_bnd_dist_r_sq = None
self.prop_bnd_dist_type_rmsd = None
self.prop_bnd_dist_type_r_sq = None
self.prop_ang_rmsd = None
self.prop_ang_r_sq = None
self.prop_tor_rmsd = None
self.prop_tor_r_sq = None
self.d_min = None # d_min value for GARD calculation
self.d_max = None # d_max value for GARD calculation
self.has_np = False # If numpy is available
self.np_version = None # numpy version
self.has_vtk = False # If VTK is available
self.vtk_version = None # VTK version
self.has_mpl = False # If matplotlib is available
self.mpl_version = None # matplotlib version
self.has_pybel = False # If openbabel/pybel is available
self.py_version = None # openbabel version
self.has_uc = False # If uncertainties is available
self.uc_version = None # uncertainties version
self.max_string_len = 60 # Maximum character length per line
def get_numpy(self, has_np, np_version):
self.has_np = has_np # If numpy is available
self.np_version = np_version # numpy version
def get_vtk(self, has_vtk, vtk_version):
self.has_vtk = has_vtk # If VTK is available
self.vtk_version = vtk_version # VTK version
def get_mpl(self, has_mpl, mpl_version):
self.has_mpl = has_mpl # If matplotlib is available
self.mpl_version = mpl_version # matplotlib version
def get_pybel(self, has_pybel, pyb_version):
self.has_pybel = has_pybel # If openbabel/pybel is available
self.pyb_version = pyb_version # openbabel/pybel version
def get_uncertainties(self, has_uc, uc_version):
self.has_uc = has_uc # If uncertainties is available
self.uc_version = uc_version # uncertainties version
def pt_no_mpl(self):
print("\n> Matplotlib appears to be missing - but is required for 2D plots!")
def check_modules(self, has_np, np_version, has_vtk, vtk_version,
has_mpl, mpl_version, has_pybel, pyb_version,
has_uc, uc_version):
self.get_numpy(has_np, np_version)
self.get_vtk(has_vtk, vtk_version)
self.get_mpl(has_mpl, mpl_version)
self.get_pybel(has_pybel, pyb_version)
self.get_uncertainties(has_uc, uc_version)
self.has_requirements = self.has_np and self.has_vtk
def pt_modules(self):
print("\nModule check:")
print("- numpy \t'" + str(self.np_version) + "'")
print("- VTK \t'" + str(self.vtk_version) + "'")
print("- matplotlib \t'" + str(self.mpl_version) + "'")
print("- uncertainties \t'" + str(self.uc_version) + "'")
print("- openbabel \t'" + str(self.pyb_version) + "'")
def format_value(self, value, n_digits):
str_len = 12 if n_digits != 0 else 5
ft_str_norm = '{:3.2f}'
if n_digits != 0:
ft_str_norm = '{:' + str(n_digits) + '.' + str(n_digits) + 'f}'
ft_str_unce = '{:.1uS}' # One digit for values with uncertainties
if self.use_std: # If standard deviations exist
if value.std_dev == 0.0 or n_digits == 0: # Different format for values without standard deviations
add = str_len - len(ft_str_norm.format(value.nominal_value))
if n_digits == 0 and value.nominal_value < 10.0:
return '0' + ft_str_norm.format(value.nominal_value) + ' ' * (add - 1)
else:
return ft_str_norm.format(value.nominal_value) + ' ' * add
else:
add = str_len - len(ft_str_unce.format(value))
return ft_str_unce.format(value) + ' ' * add
elif n_digits == 0 and value < 10.0:
add = str_len - len(ft_str_norm.format(value))
return '0' + ft_str_norm.format(value) + ' ' * (add - 1)
else: # No ufloat values
return ft_str_norm.format(value)
def format_sym_idf(self, sym_idf1, sym_idf2):
return ' ' * (6 - len(sym_idf1)) + sym_idf1 + ' -- ' + sym_idf2 + ' ' * (6 - len(sym_idf2))
###############################################################################
# WRITE OUTFILE
###############################################################################
def write_logfile(self, align, settings):
def adj_str(string, prefix='\n\t', suffix='\t'):
delta = self.max_string_len - len(string)
return prefix + string + ' ' * delta + suffix
def wt_general_info():
output.write('===================================================================================================')
output.write('\n aRMSD - automatic RMSD Calculator: Version ' +
str(self.version))
output.write('\n===================================================================================================')
output.write('\n A. Wagner, University of Heidelberg (' + str(self.year) + ')')
output.write('\n\n\tA brief description of the program can be found in the manual and in:')
output.write('\n\tA. Wagner, PhD thesis, University of Heidelberg, 2015.\n')
output.write('\n---------------------------------------------------------------------------------------------------\n')
output.write('\n*** Cite this program as:' +
'\n A. Wagner, H.-J. Himmel, J. Chem. Inf. Model, 2017, 57, 428-438.')
output.write('\n\n---------------------------------------------------------------------------------------------------')
output.write(adj_str('*** Log file of the superposition between the structures ***', prefix='\n\n', suffix='\n'))
output.write(adj_str('"' + str(self.name_mol1) + '"...', prefix='\n\t', suffix='\t') + str(self.file_mol1))
output.write(adj_str('"' + str(self.name_mol2) + '"...', prefix='\n\t', suffix='\t') + str(self.file_mol2))
def wt_consistency():
output.write(adj_str('* Consistency establishment between the structures:', prefix='\n\n', suffix='\n'))
output.write(adj_str('# The basic approach is to subsequently remove hydrogen atoms until', prefix='\n\t', suffix=''))
output.write(adj_str('# the same number of atoms is found in both molecules', prefix='\n\t', suffix=''))
output.write(adj_str('# If the number of atoms is identical and the atom types belong', prefix='\n\t', suffix=''))
output.write(adj_str('# to the same group in the periodic table, the molecules are', prefix='\n\t', suffix=''))
output.write(adj_str('# regarded as consistent', prefix='\n\t', suffix=''))
if self.disorder_mol1:
output.write(adj_str(' - Initial disorder was found in "', prefix='\n\n', suffix='') + str(self.name_mol2) + '"')
output.write(adj_str('Disorder was resolved by the user...', prefix='\n\t', suffix=''))
if self.disorder_mol2:
output.write(adj_str(' - Initial disorder was found in "', prefix='\n\t', suffix='') + str(self.name_mol2) + '"')
output.write(adj_str('Disorder was resolved by the user...', prefix='\n\t', suffix=''))
if not self.disorder_mol1 and not self.disorder_mol2:
output.write(adj_str(' - No disorder in the structures was found', prefix='\n\n', suffix='\n'))
output.write(adj_str('Initial number of atoms in "' + str(self.name_mol1) + '"...', prefix='\n\t', suffix='\t') +
str(self.cons_init_at_mol1) + '\t(' + str(self.cons_init_at_H_mol1) + ' H atoms)')
output.write(adj_str('Initial number of atoms in "' + str(self.name_mol2) + '"...', prefix='\n\t', suffix='\t') +
str(self.cons_init_at_mol2) + '\t(' + str(self.cons_init_at_H_mol2) + ' H atoms)')
if self.rem_H_btc:
output.write(adj_str('H atoms bound to carbon were removed...', prefix='\n\t', suffix=''))
if self.rem_H_btg14:
output.write(adj_str('H atoms bound to group-14 elements were removed...', prefix='\n\t', suffix=''))
if self.rem_H_all:
output.write(adj_str('All H atoms were removed...', prefix='\n\t', suffix=''))
output.write(adj_str(' - Consistency between the structures was established', prefix='\n\n', suffix=''))
output.write(adj_str('The number of atoms in "' + str(self.name_mol1) + '"...', prefix='\n\n\t', suffix='\t') +
str(self.cons_at_mol1) + '\t(' + str(self.cons_at_H_mol1) + ' H atoms)')
output.write(adj_str('The number of atoms in "' + str(self.name_mol2) + '"...', prefix='\n\t', suffix='\t') +
str(self.cons_at_mol2) + '\t(' + str(self.cons_at_H_mol2) + ' H atoms)')
if not self.user_choice_rem_all_H and not self.user_choice_rem_btc_H and not self.user_choice_rem_btg14_H:
output.write(adj_str(' - No further modifications were done', prefix='\n\n', suffix=''))
if self.user_choice_rem_all_H:
output.write(adj_str(' - All hydrogen atoms were removed by the user', prefix='\n\n', suffix=''))
elif self.user_choice_rem_btc_H:
output.write(adj_str(' - All hydrogen atoms bound to carbon were removed by the user', prefix='\n\n', suffix=''))
elif self.user_choice_rem_btg14_H:
output.write(adj_str(' - All hydrogen atoms bound to group-14 elements were removed by the user', prefix='\n\n', suffix=''))
output.write(adj_str('Final number of atoms...', prefix='\n\n\t', suffix='\t') + str(align.n_atoms) +
'\t(' + str(align.n_hydro) + ' H atoms)')
def wt_std_orientation():
output.write(adj_str('* Transformation of the molecules into "Standard Orientation":', prefix='\n\n', suffix='\n'))
output.write(adj_str('# 1. The center of mass was shifted to the Cartesian origin', prefix='\n\t', suffix=''))
output.write(adj_str('# 2. The moment of inertia tensor was constructed, diagonalized', prefix='\n\t', suffix=''))
output.write(adj_str('# and the eigenvectors rotated on the x, y and z axes', prefix='\n\t', suffix=''))
def wt_match():
output.write(adj_str('* Details of the matching process:', prefix='\n\n', suffix='\n'))
output.write(adj_str('Structures were matched...', prefix='\n\t', suffix='\t') + str(self.is_matched))
if self.is_matched:
output.write(adj_str('Applied matching algorithm...', prefix='\n\t', suffix='\t') + str(self.match_alg))
output.write(adj_str('Solver used for matching...', prefix='\n\t', suffix='\t') + str(self.match_solv))
if self.use_groups:
output.write(adj_str('Solution of the matching problem...', prefix='\n\t', suffix='\t') + 'PSE groups')
else:
output.write(adj_str('Solution of the matching problem...', prefix='\n\t', suffix='\t') + 'regular')
output.write(adj_str('Number of highest deviations to be shown...', prefix='\n\t', suffix='\t') + str(self.n_dev))
output.write(adj_str('The highest deviations were between the pairs...', prefix='\n\n\t', suffix='\t') + '[Angstrom]\n')
[output.write('\n\t\t\t' + self.format_sym_idf(align.sym_idf_mol1[self.disord_pos[entry]], align.sym_idf_mol2[self.disord_pos[entry]]) + '\t\t\t\t\t\t\t' +
'{:6.5f}'.format(self.disord_rmsd[entry])) for entry in range(self.n_dev)]
output.write(adj_str('The RMSD after matching was...', prefix='\n\n\t', suffix='\t') +
'{:6.5f}'.format(self.match_rmsd) + ' [Angstrom]')
def wt_kabsch():
# Contribution of individual atom types
rmsd_perc = (align.rmsd_idv ** 2 / np.sum(align.rmsd_idv ** 2)) * 100
output.write(adj_str('* Kabsch alignment:', prefix='\n\n', suffix='\n'))
output.write(adj_str('# General settings', prefix='\n\t', suffix='\n'))
output.write(adj_str('Substructures were defined...', prefix='\n\t', suffix='\t') + str(align.has_sub_rmsd))
output.write(adj_str('Weighting function for Kabsch algorithm...', prefix='\n\t', suffix='\t') + str(align.wts_type))
output.write(adj_str('Consideration of multi-center-contributions...', prefix='\n\t', suffix='\t') + str(align.calc_mcc))
output.write(adj_str('# Differentiation criteria and color information', prefix='\n\n\t', suffix='\n'))
output.write(adj_str('Number of colors for aRMSD plot...', prefix='\n\t', suffix='\t') + str(settings.n_col_aRMSD))
output.write(adj_str('Maximum RMSD value for color projection...', prefix='\n\t', suffix='\t') + str(settings.max_RMSD_diff) + ' [Angstrom]')
output.write(adj_str('Threshold for bond comparison...', prefix='\n\t', suffix='\t') + str(settings.thresh) + ' [Angstrom]')
output.write(adj_str('Number of distance pairs above threshold...', prefix='\n\t', suffix='\t') + str(align.n_chd_bnd) + ' [Angstrom]')
output.write(adj_str('Percentage of the colored intersections...', prefix='\n\t', suffix='\t') + str((1.0 - 2 * settings.n) * 100) + ' [%]')
output.write(adj_str('Color for shorter bonds in "' + str(self.name_mol1) + '" wrt "' + str(self.name_mol2) + '"...', prefix='\n\t', suffix='\t') +
str(settings.col_short_hex) + ' [HEX]')
output.write(adj_str('Color for longer bonds in "' + str(self.name_mol2) + '" wrt "' + str(self.name_mol2) + '"...', prefix='\n\t', suffix='\t') +
str(settings.col_long_hex) + ' [HEX]')
output.write(adj_str('Number of bonds below threshold...', prefix='\n\t', suffix='\t') + str(align.n_chd_bnd))
output.write(adj_str('Color of "' + str(self.name_mol1) + '"...', prefix='\n\t', suffix='\t') + str(settings.col_model_fin_hex) + ' [HEX]')
output.write(adj_str('Color of "' + str(self.name_mol2) + '"...', prefix='\n\t', suffix='\t') + str(settings.col_refer_fin_hex) + ' [HEX]')
output.write(adj_str('Final rotation matrix from "Standard Orientation"...', prefix='\n\n\t', suffix='\n'))
output.write('\n\t |' + '{:+06.8f}'.format(align.tot_rot_mat[0][0]) + ' ' +
'{:+06.8f}'.format(align.tot_rot_mat[0][1]) +
' ' + '{:+06.8f}'.format(align.tot_rot_mat[0][2]) + '|')
output.write('\n\t U = |' + '{:+06.8f}'.format(align.tot_rot_mat[1][0]) + ' ' +
'{:+06.8f}'.format(align.tot_rot_mat[1][1]) +
' ' + '{:+06.8f}'.format(align.tot_rot_mat[1][2]) + '|')
output.write('\n\t |' + '{:+06.8f}'.format(align.tot_rot_mat[2][0]) + ' ' +
'{:+06.8f}'.format(align.tot_rot_mat[2][1]) +
' ' + '{:+06.8f}'.format(align.tot_rot_mat[2][2]) + '|')
output.write(adj_str('# This matrix aligns "' + str(self.name_mol1) + '" with "' + str(self.name_mol2) + '"', prefix='\n\n\t', suffix=''))
output.write(adj_str('# U already includes all custom symmetry operations!', prefix='\n\t', suffix=''))
output.write(adj_str('* Quality of the Superposition:', prefix='\n\n', suffix='\n'))
output.write(adj_str('d values for the GARD calculation...', prefix='\n\t', suffix='\t') + str(self.d_min) + ', ' + str(self.d_max))
output.write(adj_str('Superposition R^2...', prefix='\n\t', suffix='\t') + self.format_value(align.r_sq, n_digits=5) +
' [Dimensionless]')
output.write(adj_str('Cosine similarity...', prefix='\n\t', suffix='\t') + self.format_value(align.cos_sim, n_digits=5) +
' [Dimensionless]')
output.write(adj_str('GARD score...', prefix='\n\t', suffix='\t') + self.format_value(align.gard, n_digits=5) +
' [Dimensionless]')
output.write(adj_str('RMSD...', prefix='\n\t', suffix='\t') + self.format_value(align.rmsd, n_digits=5) +
' [Angstrom]')
output.write(adj_str(' - Decomposition into different atom types', prefix='\n\n', suffix='\t\t') + 'Absolute [Angstrom] \tRelative [%]\n')
[output.write('\n\t\t\t' + "{:4.4s}".format(align.at_types[entry]) + ' (#' + "{:3.0f}".format(align.occ[entry]) + ')\t\t\t\t\t\t\t\t' +
self.format_value(align.rmsd_idv[entry], n_digits=5) + ' \t\t\t(' +
self.format_value(rmsd_perc[entry], n_digits=0) + ')')
for entry in range(align.n_atom_types)]
output.write(adj_str(' - z-matrix properties', prefix='\n\n', suffix='\n'))
output.write(adj_str('# z-matrices are created for both molecules using', prefix='\n\t', suffix=''))
output.write(adj_str('# 3 N - 1 bond distances, 3 N - 2 bond angles and', prefix='\n\t', suffix=''))
output.write(adj_str('# 3 N - 3 dihedral angles and the total RMSD and', prefix='\n\t', suffix=''))
output.write(adj_str('# the relative contributions are calculated', prefix='\n\t', suffix='\n'))
output.write(adj_str('RMSD...', prefix='\n\t', suffix='\t') + self.format_value(align.rmsd_z_matrix, n_digits=5) +
' [Angstrom]')
output.write(adj_str('Contribution of distances...', prefix='\n\t', suffix='\t') + self.format_value(align.c_dis * 100, n_digits=0) + ' [%]')
output.write(adj_str('Contribution of angles...', prefix='\n\t', suffix='\t') + self.format_value(align.c_ang * 100, n_digits=0) + ' [%]')
output.write(adj_str('Contribution of dihedral angles...', prefix='\n\t', suffix='\t') + self.format_value(align.c_tor * 100, n_digits=0) + ' [%]')
if align.has_sub_rmsd:
form1 = align.make_sum_formula(pos=align.pos_sub1)
form2 = align.make_sum_formula(pos=align.pos_sub2)
output.write(adj_str(' - Decomposition into different substructures', prefix='\n\n', suffix='\n'))
output.write(adj_str('Substructure 1', prefix='\n\t', suffix='\t') + '[' + str(form1) + ']' + ' (' + str(len(align.pos_sub1)) + ' atoms)')
output.write(adj_str('Superposition R^2...', prefix='\n\t', suffix='\t') + self.format_value(align.r_sq_sub1, n_digits=5) + ' [Dimensionless]')
output.write(adj_str('Cosine similarity...', prefix='\n\t', suffix='\t') + self.format_value(align.cos_sim_sub1, n_digits=5) + ' [Dimensionless]')
output.write(adj_str('RMSD...', prefix='\n\t', suffix='\t') + self.format_value(align.rmsd_sub1, n_digits=5) + ' [Angstrom]')
output.write(adj_str('Contribution...', prefix='\n\t', suffix='\t') + self.format_value(align.c_sub1 * 100, n_digits=0) + ' [%]')
output.write(adj_str('Substructure 2', prefix='\n\n\t', suffix='\t') + '[' + str(form2) + ']' + ' (' + str(len(align.pos_sub2)) + ' atoms)')
output.write(adj_str('Superposition R^2...', prefix='\n\t', suffix='\t') + self.format_value(align.r_sq_sub2, n_digits=5) + ' [Dimensionless]')
output.write(adj_str('Cosine similarity...', prefix='\n\t', suffix='\t') + self.format_value(align.cos_sim_sub2, n_digits=5) + ' [Dimensionless]')
output.write(adj_str('RMSD...', prefix='\n\t', suffix='\t') + self.format_value(align.rmsd_sub2, n_digits=5) + ' [Angstrom]')
output.write(adj_str('Contribution...', prefix='\n\t', suffix='\t') + self.format_value(align.c_sub2 * 100, n_digits=0) + ' [%]')
def wt_prop(prop):
return '{:6.5f}'.format(prop) if prop is not None else str(prop)
def wt_struct():
if align.has_stats:
output.write(adj_str('* Evaluation of structural parameters:', prefix='\n\n', suffix='\n'))
output.write(adj_str('# 1. The RMSE values are the root-mean-square errors', prefix='\n\t', suffix=''))
output.write(adj_str('# between the corresponding properties of the two structures', prefix='\n\t', suffix=''))
output.write(adj_str('# 2. The R2 values are the the correlation coefficients', prefix='\n\t', suffix=''))
output.write(adj_str('# between the two data sets', prefix='\n\t', suffix=''))
output.write(adj_str('Number of bonds...', prefix='\n\n\t', suffix='\t') + str(align.n_bonds))
output.write(adj_str('R2 of linear correlation...', prefix='\n\t', suffix='\t') + wt_prop(self.prop_bnd_dist_r_sq) + ' [Dimensionless]')
output.write(adj_str('RMSE...', prefix='\n\t', suffix='\t') + wt_prop(self.prop_bnd_dist_rmsd) + ' [Angstrom]')
output.write(adj_str('Number of bond types...', prefix='\n\n\t', suffix='\t') + str(align.n_bnd_types))
output.write(adj_str('R2 of linear correlation...', prefix='\n\t', suffix='\t') + wt_prop(self.prop_bnd_dist_r_sq) + ' [Dimensionless]')
output.write(adj_str('RMSE...', prefix='\n\t', suffix='\t') + wt_prop(self.prop_bnd_dist_type_rmsd) + ' [Angstrom]')
output.write(adj_str('Number of angles...', prefix='\n\n\t', suffix='\t') + str(align.n_angles))
output.write(adj_str('R2 of linear correlation...', prefix='\n\t', suffix='\t') + wt_prop(self.prop_ang_r_sq) + ' [Dimensionless]')
output.write(adj_str('RMSE...', prefix='\n\t', suffix='\t') + wt_prop(self.prop_ang_rmsd) + ' [Degrees]')
output.write(adj_str('Number of dihedrals...', prefix='\n\n\t', suffix='\t') + str(align.n_torsions))
output.write(adj_str('R2 of linear correlation...', prefix='\n\t', suffix='\t') + wt_prop(self.prop_tor_r_sq) + ' [Dimensionless]')
output.write(adj_str('RMSE...', prefix='\n\t', suffix='\t') + wt_prop(self.prop_tor_rmsd) + ' [Degrees]')
def wt_eof():
output.write(adj_str('*** End of log file ***', prefix='\n\n', suffix=''))
output = open(self.file_name, 'w') # Create a new file
# Write all information aspects to file
wt_general_info()
wt_consistency()
wt_std_orientation()
wt_match()
wt_kabsch()
wt_struct()
wt_eof()
output.close() # Close the outfile
self.pt_logfile_written() # Inform the user
def pt_logfile_written(self):
print("\n> A logfile (" + str(self.file_name) + ") has been written successfully!")
###############################################################################
# HANDLERS FOR USER INPUT
###############################################################################
def get_menu_choice(self, choices, question, return_type='int'):
""" Handles user input in menu routine, returns a valid user choice based on 'return_type' """
while True: # Stay in loop until a valid user choice
try:
choice = eval(input(question))
if return_type == 'int':
choice = int(choice)
elif return_type == 'float':
choice = float(choice)
except (NameError, SyntaxError, ValueError):
# If input can not be converted: Raise error and set operation to ""
choice = ""
# If return type is HTML evaluate if user input is a valid HTML color
if return_type == 'HTML':
if self.html_check(choice):
break
else:
self.pt_invalid_input()
# If return type is float evaluate if float is in range of choices
elif return_type == 'float':
if choice >= min(choices) and choice <= max(choices):
break
else:
self.pt_invalid_input()
elif return_type == 'symOP':
char_list = ['x', 'y', 'z', ' ', '.', ',', '/', '*', '-', '+',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0']
symbol_ok = 'x' in choice and 'y' in choice and 'z' in choice
comma_ok = choice.count(',') == 2
char_ok = False not in [choice[pos] in char_list for pos in range(len(choice))]
if False not in [symbol_ok, comma_ok, char_ok]:
break
else:
self.pt_invalid_input()
# Check if operation is a valid choice and exit loop if True
elif choice in choices:
if return_type == 'bool' and choice == 0:
choice = False
elif return_type == 'bool' and choice == 1:
choice = True
break
# Otherwise pass and stay in loop
else:
self.pt_invalid_input()
# Return value(s)
return choice
def html_check(self, color_string):
""" Checks if string is a correct HTML color """
def if_entry_in_dict(entry):
html_dict = ['F', 'f', 'E', 'e', 'D', 'd', 'B', 'b', 'A',
'a', 'C', 'c', '0', '1', '2', '3', '4', '5',
'6', '7', '8', '9']
return entry in html_dict
if type(color_string) != str or len(color_string) != 7 or color_string[0] != '#':
return False
else:
string_in_list = [if_entry_in_dict(color_string[entry]) for entry in range(1, len(color_string))]
return False not in string_in_list
def change_TF(self, dictionary, key):
""" Changes True/false variables of a given key in a dictionary """
if dictionary[key]:
dictionary[key] = False
else:
dictionary[key] = True
return dictionary
###############################################################################
# START
###############################################################################
def pt_welcome(self):
print("\n\n=============================================================================")
print(" aRMSD: Version " + str(self.version))
print("=============================================================================")
print(" A. Wagner, University of Heidelberg (" + str(self.year) + ")")
print("-------------------------------- Description --------------------------------")
print("Key features:")
print("* Parses data from various file formats")
print("* Establishes consistency and matches coordinate sequences of two molecules")
print("* Aligns two molecular structures based on the Kabsch algorithm")
print("* Supports different weighting functions for the superposition")
print("* Supports error propagation for experimental structures")
print("* Generates different visualization types of the superposition results")
print("* Writes outfiles that can be passed to other programs")
print("* ... more features and changes can be found in the documentation")
print(" ... this project is hosted on GitHub: https://github.com/armsd/aRMSD")
print(" ... documentation: http://armsd.rtfd.io")
print("-----------------------------------------------------------------------------")
print(
'\n*** Cite this program as:' +
'\n A. Wagner, H.-J. Himmel, J. Chem. Inf. Model, 2017, 57, 428-438.')
def pt_versions(self, core_version, plot_version, log_version):
print("\nRelease dates of the individual modules:")
print("core module: \t'" + str(core_version) + "'")
print("plot module: \t'" + str(plot_version) + "'")
print("log module: \t'" + str(log_version) + "'")
def pt_start(self):
print("\n> Starting program ...")
print("-----------------------------------------------------------------------------")
###############################################################################
# FILE IMPORT
###############################################################################
def pt_file_not_found(self):
print("\n> File not found, please try again!")
def pt_import_success(self, input_file, element_symbol):
print("\n> '" + str(input_file) + "' has been loaded successfully! (#Atoms: " + str(len(element_symbol)) + ")")
def lg_files_loaded(self):
print("-----------------------------------------------------------------------------")
print("... Files have been loaded!")
self.file_import = True # Log the successful import
def pt_no_pybel(self):
print("\n> ERROR: Openbabel is required and missing!")
def pt_no_pybel_file(self):
print("\n> ERROR: Openbabel does not recognize the file type, try a different file!")
def chk_std_devs(self, molecule1, molecule2, settings):
print("\n-----------------------------------------------------------------------------")
print("> Checking for coordinate standard deviations...")
self.has_cor_std_mol1 = np.sum(molecule1.cor_std) != 0.0 # Checks deviations for both molecules
self.has_cor_std_mol2 = np.sum(molecule2.cor_std) != 0.0
self.use_std = True in [self.has_cor_std_mol1, self.has_cor_std_mol2] # If any standard deviations exist
settings.use_std = self.use_std # Copy information to settings
if self.use_std:
print("... Standard deviations were found and will be used!")
else:
print("... No standard deviations were found!")
print("-----------------------------------------------------------------------------")
###############################################################################
# Plotting
###############################################################################
def pt_plotting(self):
print("\n> Results are now shown ... close the pop-up window to continue!")
def pt_plotting_screenshot(self):
print("> Press the 's' button to save the scene as .png file or 'h' for help.\n")
def pt_plotting_substructure_def(self):
print("\n-----------------------------------------------------------------------------")
print("========================== Substructure Definition =========================")
print("-----------------------------------------------------------------------------")
print("\n> - Click on atoms to add or remove them from the designated substructure")
print("\n> You have to select at least two atoms, but keep in mind that substructures")
print("> with few atoms are not very meaningful. All unselected atoms will")
print("> constitute the second substructure.\n")
print("-----------------------------------------------------------------------------")
def pt_plotting_deleted(self, n_del):
if n_del > 0:
print("\n> A total of " + str(n_del) + " atoms have been deleted!")
def pt_aRMSD_plot_info(self):
print("\n> - A click on one atom will show its RMSD contribution,")
print("> 2/3 or 4 selected atoms will display information about the")
print("> respective distances, angles and dihedrals.\n")
def pt_warning_bond_types(self):
print("\n> WARNING: Only 2 bond types were found - consequently R2 is meaningless!")
###############################################################################
# CONSISTENCY MESSAGES
###############################################################################
def pt_consistency_old(self):
print("\n-----------------------------------------------------------------------------")
print("=============== Consistency Checks and Structural Modification ==============")
print("-----------------------------------------------------------------------------")
def pt_consistency_menu(self):
print("\n-----------------------------------------------------------------------------")
print("=============== Consistency Checks and Structural Modification ==============")
print("-----------------------------------------------------------------------------")
print("-10 Reset molecules to the original status")
print("-5 Load the saved status (save point available: '" + str(self.was_saved) + "')")
print("-2 Reset substructure")
print("-1 Establish consistency based on current (sub)structures")
print("-----------------------------------------------------------------------------")
print(" A substructure has been defined : '" + str(self.has_sub) + "'")
print(" Consistency has been established : '" + str(self.consist) + "'")
print(" Group matching algorithm will be used : '" + str(self.use_groups) + "'")
print("-----------------------------------------------------------------------------")
print("0 ... exit the menu (point of no return)")
print("1 ... show information about the two data sets")
print("2 ... define substructures (!next release!)")
print("3 ... remove selected atoms")
print("8 ... show the molecules again")
print("-----------------------------------------------------------------------------")
print("10 ... save current changes")
print("20 ... render the combined scene with VTK")
print("21 ... export the scene/structures, change VTK settings")
print("-----------------------------------------------------------------------------")
def pt_diff_at_number(self):
print("\n> Different number of atoms in the two structures:")
def pt_possibilities(self, n_hydro, n_hydro_c, n_hydro_full):
choices = []
print("\n-----------------------------------------------------------------------------")
print(" What should happen to the remaining " + str(n_hydro) + " H-atoms?")
print("-----------------------------------------------------------------------------")
print(" Info: The exclusion of H-atoms in RMSD calculations is recommended if")
print(" they were not located and refined in the X-ray experiment")
print("-----------------------------------------------------------------------------")
if not self.rem_H_all:
print("0 ... remove all hydrogen atoms (" + str(n_hydro) + ")")
choices.append(0)
if not self.rem_H_btc and self.can_rem_H_btc and n_hydro != n_hydro_c:
print("1 ... remove all hydrogen atoms bound to carbon (" + str(n_hydro_c) + ")")
choices.append(1)
if not self.rem_H_btg14 and self.can_rem_H_btg14:
print("2 ... remove all hydrogen atoms bound to group-14 elements (" + str(n_hydro_full) + ")")
choices.append(2)
print("3 ... keep all hydrogen atoms")
print("-----------------------------------------------------------------------------")
choices.append(3)
return choices
def pt_consistency_start(self, n_atoms1, n_atoms2):
print("\n> Performing consistency checks ... (Number of atoms: " + str(n_atoms1) + ", " + str(n_atoms2) + ")")
def lg_multiple_occupation_found(self, logg_for='molecule1'):
print("\n> WARNING: Atoms on identical positions were found!")
print(" Please carefully check your input files!")
if logg_for == 'molecule1':
self.disorder_mol1 = True
name = self.name_mol1
else:
self.disorder_mol2 = True
name = self.name_mol2
print(" Disordered positions in molecule: '" + str(name) + "'\n")
def pt_info_multiple_occupation(self, sym, idf, xyz, entry):
print("-----------------------------------------------------------------------------")
print("Entry\tSym-Idf\t\t\t[xyz]")
[print("Pos:\t" + str(sym[idx]) + "-" + str(idf[idx]) + "\t\t" + str(xyz[idx])) for idx in entry]
print("-----------------------------------------------------------------------------")
def pt_number_h_atoms(self, n_hydro_mol1, n_hydro_mol2):
print(" There are (" + str(n_hydro_mol1) + " & " + str(n_hydro_mol2) + ") H atoms in the molecules")
def lg_rem_H_btc(self, n_hydro_mol1, n_hydro_mol2):
self.rem_H_btc = True
print("\n Removing all hydrogen atoms bound to carbon ...")
print(" All respective hydrogen atoms (" + str(n_hydro_mol1) + " & " + str(
n_hydro_mol2) + ") have been removed!")
def lg_rem_H_btg14(self, n_hydro_mol1, n_hydro_mol2):
self.rem_H_btg14 = True
print("\n Removing all hydrogen atoms bound to group-14 elements ...")
print(" All respective hydrogen atoms (" + str(n_hydro_mol1) + " & " + str(
n_hydro_mol2) + ") have been removed!")
def lg_rem_H_all(self, n_hydro_mol1, n_hydro_mol2):
self.rem_H_all = True
print("\n Removing all hydrogen atoms ...")
print(" All hydrogen atoms (" + str(n_hydro_mol1) + " & " + str(n_hydro_mol2) + ") have been removed!")
def lg_group_algorithm(self):
self.use_groups = True
print("\n> The matching problem will be solved for PSE groups.")
def lg_consistent(self):
self.consist = True
print("\n> The structures of both molecules are consistent.")
def lg_substructure(self):
self.has_sub = True
print("\n> A proper substructure has been defined and will be used ...")
def lg_wrong_substructure(self):
self.has_sub = False
print("\n> ERROR: Number of atoms must be identical in both substructures and >= 3")
def lg_reset_substructure(self):
self.has_sub = False
print("\n> Reset substructures to the full coordinate sets.")
def pt_consistency_failure(self, n_atoms1, n_atoms2):
print("\n> ERROR: Number of atoms (" + str(n_atoms1) + " & " + str(n_atoms2) +
") is not identical - check your input files!")
self.pt_exiting()
def pt_consistency_error(self):
print("\n> ERROR: Severe problem encountered - check your input files!")
self.pt_exiting()
def pt_no_consistency_done(self):
print("\n> ERROR: Data sets need to be checked for consistency!")
def pt_saved_status(self):
self.was_saved = True
print("\n> The current status was saved successfully!")
def pt_loaded_status(self):
self.was_saved = True
print("\n> The last saved status was loaded successfully!")
def pt_write_success(self, output_file):
print("\n> Coordinates were written to '" + str(output_file) + "' !")
def pt_interpol_write_success(self, align):
print("\n> Interpolated coordinates were written to 'interp_'... outfiles!")
print("\n The reference structure (RMSD = 0.0) corresponds to file number 0")
print("\n The model structure corresponds to file number " +
str(len(align.interp_rmsd) - 1) + "\n")
[print('\tFile number: ' + str(entry).zfill(2) + ' \tRMSD...\t\t' +
self.format_value(align.interp_rmsd[entry], n_digits=5)
+ ' [Angstrom]') for entry in range(len(align.interp_rmsd))]
def pt_unsupported_atom(self, symbol):
print("\n> ERROR: Unsupported atomic symbol ('" + str(symbol) + "') found!")
print(" Check your files for dummy atoms, etc.!")
def pt_unsupported_element(self, symbol):
print("\n> ERROR: Unknown element symbol ('" + str(symbol) + "') encountered!")
# COORDINATE UNITS
# ------------------------------------------------------------------------------
def lg_unit_check(self):
self.chk_for_unit = True
print("\n Checking length unit of xyz coordinates ...")
def lg_unit_check_warning(self):
self.chk_for_unit_warn = True
print("\n> WARNING: Number of atoms insufficient!\nAssuming [Angstrom] as unit ...")
def pt_unit_is_a(self):
print(" The coordinate unit is [Angstrom]")
def lg_transform_coord_unit(self):
self.unit_transform = True
print(" Coordinates were transformed from [Bohr] to [Angstrom], updating bonds ...")
# MISC
# ------------------------------------------------------------------------------
def pt_exiting(self):
print(" Exiting program ...")
def pt_render_first(self):
print("\n> ERROR: You need to render the scene with VTK first!")
def pt_kabsch_first(self):
print("\n> ERROR: You need to perform the Kabsch algorithm first!")
def pt_future_implementation(self):
print("\n> ERROR: Will be implemented in the future!")
def pt_swap_n_pairs(self):
question = "\n>> How many atom pairs are to be swapped? "
return question
def pt_swap_atoms(self, sym1, idf1, sym2, idf2):
print("\n> Swapping atom " + str(sym1) + str(idf1) + " with " + str(sym2) + str(idf2))
def pt_program_termination(self):
print("\n-----------------------------------------------------------------------------")
print("========================= Normal program termination ========================")
print("-----------------------------------------------------------------------------")
def pt_requirement_error(self):
print("\n> ERROR: Requirements are not met, exiting program!")
# FILE HANDLING
# ------------------------------------------------------------------------------
def pt_unsupported_file_type(self, filetypes):
print("\n> ERROR: Unsupported file type - supported types are:")
print(" "+', '.join(filetypes))
def pt_no_ob_support(self):
print("\n> ERROR: File type is not supported by openbabel!")
def inp_file_export(self, example='myfile.xyz', prefix=None):
if prefix is None:
pass
else:
print("\nEnter the " + str(prefix) + " filename:")
return input("\n> Enter a filename (e.g. " + str(example) + "): ")
def wrt_comment(self, comment=None):
if comment is None:
return "# Created with aRMSD V. " + str(self.version) + "\n"
else:
return "# Created with aRMSD V. " + str(self.version) + str(comment) + "\n"
# MATCHING AND SYMMETRY OPERATIONS
# ------------------------------------------------------------------------------
def pt_sym_expand(self, symOP):
print("\n> Expanding coordinates by symmetry operation '" + str(symOP) + "'")
def pt_standard_orientation(self):
print("\n> The molecules were rotated into 'Standard Orientation' ...")
def pt_match_reset(self, reset_type='save'):
if reset_type == 'save':
print("\n> Matching process will be reseted ...")
elif reset_type == 'origin':
print("\n> Consistency process will be reseted ...")
def pt_sym_inversion(self):
print("\n> Inversion of '" + str(self.name_mol1) + "' structure at the origin of the coordinate system ...")
def pt_sym_reflection(self, plane):
print("\n> Reflection of '" + str(self.name_mol1) + "' structure at the " + str(plane) + "-plane ...")
def pt_rot_info(self):
print("\nUse '-n' for counter-clockwise rotations\n(e.g. -20 for a rotation of -360/20 = -18 deg.)")
def pt_sym_rotation(self, n, axis):
if n < 0:
print("\nA " + str(abs(n)) + "-fold ccw rotation (" + str(round(360.0 / n, 1)) + " deg.) around the " +
str(axis) + "-axis was requested.")
else:
print("\nA " + str(n) + "-fold cw rotation (" + str(round(360.0 / n, 1)) + " deg.) around the " +
str(axis) + "-axis was requested.")
print("\n> Applying rotation to '" + str(self.name_mol1) + "' structure ...")
def pt_wrong_n(self):
print("\n> ERROR: n has been set to 1 (it can't be 0)!")
def pt_wrong_n_dev(self):
print("\n> ERROR: The number of deviations must be 1 < n_dev < n_atoms!")
def pt_invalid_input(self):
print("\n> ERROR: Input invalid or out of range, try again!")
def pt_exit_sym_menu(self):
print("\n> Exiting symmetry transformation menu ...")
def pt_no_match(self):
print("\nThe molecules were not matched, exit anyway? ('y' / 'n')")
def pt_highest_deviations(self, molecule1, molecule2, settings):
print("\n-----------------------------------------------------------------------------\n")
print("\tThe geometric RMSD of the current alignment is: " + "{:6.3f}".format(self.match_rmsd) + " A\n")
print("\t\t The " + str(settings.n_dev) + " most disordered atom pairs are:")
print("\t\tEntry\t\t Pair\t\t Distance / A ")
[print("\t\t " + str(settings.n_dev - entry) + "\t\t" + self.format_sym_idf(molecule1.sym_idf[molecule1.disord_pos[entry]],
molecule2.sym_idf[molecule2.disord_pos[entry]]) +
"\t " + "{:6.3f}".format(molecule2.disord_rmsd[entry])) for entry in range(settings.n_dev)]
def pt_all_bonds(self, align):
print("\n-----------------------------------------------------------------------------")
def pt_no_proper_rmsd_sub(self):
print("\n> ERROR: Proper substructures (with at least two atoms) were not defined!")
def pt_max_dev_internal(self, align, settings):
desc_dis, dis_mol1, dis_mol2, delta_dis = align.get_max_diff_prop(settings, prop='distance')
desc_ang, ang_mol1, ang_mol2, delta_ang = align.get_max_diff_prop(settings, prop='angle')
desc_tor, tor_mol1, tor_mol2, delta_tor = align.get_max_diff_prop(settings, prop='torsion')
print("\n--------------- The Highest Deviations in Internal Coordinates --------------")
print("The " + str(settings.n_max_diff) + " highest deviations are printed below")
print("Entries are: Atoms, values in the Model and Reference, difference")
print("-----------------------------------------------------------------------------")
print(" >> Bonds (in Angstrom):")
[print(" " + str(entry + 1) + ". " + str(desc_dis[entry]) + " " + str(dis_mol1[entry]) + " " + str(
dis_mol2[entry]) +
"\tDiff. " + str(delta_dis[entry])) for entry in range(settings.n_max_diff)]
print("-----------------------------------------------------------------------------")
print(" >> Bond angles (in deg.):")
[print(" " + str(entry + 1) + ". " + str(desc_ang[entry]) + " " + str(ang_mol1[entry]) + " " + str(
ang_mol2[entry]) +
"\tDiff. " + str(delta_ang[entry])) for entry in range(settings.n_max_diff)]
print("-----------------------------------------------------------------------------")
print(" >> Dihedral angles (in deg.):")
[print(" " + str(entry + 1) + ". " + str(desc_tor[entry]) + " " + str(tor_mol1[entry]) + " " + str(
tor_mol2[entry]) +
"\tDiff. " + str(delta_tor[entry])) for entry in range(settings.n_max_diff)]
print("-----------------------------------------------------------------------------")
def pt_rmsd_results(self, align):
# Contribution of individual atom types (based on MSD)
rmsd_perc = (align.rmsd_idv ** 2 / np.sum(align.rmsd_idv ** 2)) * 100
print("\n-----------------------------------------------------------------------------")
print("====================== Quality of the Superposition =========================")
print("-----------------------------------------------------------------------------")
print("\n> The type of weighting function is: '" + str(align.wts_type) + "'")
print("\n-------------------------- Similarity Descriptors ---------------------------")
print(" >>> Superposition R^2 : " + self.format_value(align.r_sq, n_digits=5))
print(" >>> Cosine similarity : " + self.format_value(align.cos_sim, n_digits=5))
print(" >>> GARD score : " + self.format_value(align.gard, n_digits=5))
print("\n------------------------ Root-Mean-Square-Deviation -------------------------")
print(" >>> RMSD : " + self.format_value(align.rmsd, n_digits=5) + " Angstrom")
print("\n >>> - Decomposition : Individual atom types (total percentage)")
[print(" " + "{:4.4s}".format(align.at_types[entry]) + " (#" +
"{:3.0f}".format(align.occ[entry]) + ") : " +
self.format_value(align.rmsd_idv[entry], n_digits=5) + " Angstrom (" +
self.format_value(rmsd_perc[entry], n_digits=0) + " %)") for entry in range(align.n_atom_types)]
if align.has_sub_rmsd:
form1, form2 = align.make_sum_formula(pos=align.pos_sub1), align.make_sum_formula(pos=align.pos_sub2)
print("\n >>> - Decomposition : Substructure properties")
print("-----------------------------------------------------------------------------")
print(" Substructure 1 : # Atoms: " + str(len(align.pos_sub1)) + " [" + str(form1) + "]")
print(" RMSD : " + self.format_value(align.rmsd_sub1, n_digits=5) + " Angstrom (" +
self.format_value(align.c_sub1 * 100.0, n_digits=0) + " %)")
print(" Superposition R^2 : " + self.format_value(align.r_sq_sub1, n_digits=5))
print(" Cosine similarity : " + self.format_value(align.cos_sim_sub1, n_digits=5))
print("-----------------------------------------------------------------------------")
print(" Substructure 2 : # Atoms: " + str(len(align.pos_sub2)) + " [" + str(form2) + "]")
print(" RMSD : " + self.format_value(align.rmsd_sub2, n_digits=5) + " Angstrom (" +
self.format_value(align.c_sub2 * 100.0, n_digits=0) + " %)")
print(" Superposition R^2 : " + self.format_value(align.r_sq_sub2, n_digits=5))
print(" Cosine similarity : " + self.format_value(align.cos_sim_sub2, n_digits=5))
elif not align.has_sub_rmsd and align.has_sub:
print("\n> INFO: Reexecute Kabsch alignment to include the substructure decomposition!")
print("\n--------------------------- Z-matrix properties -----------------------------")
print(" >>> RMSD : " + self.format_value(align.rmsd_z_matrix, n_digits=5))
print("\n >>> - Decomposition : total percentage")
print(" distances : " + self.format_value(align.c_dis * 100.0, n_digits=0) + " %")
print(" angles : " + self.format_value(align.c_ang * 100.0, n_digits=0) + " %")
print(" dihedrals : " + self.format_value(align.c_tor * 100.0, n_digits=0) + " %")
def pt_data_set_info(self, molecule1, molecule2):
print("\n-----------------------------------------------------------------------------")
print("==================== Information about Molecular Data =======================")
print("-----------------------------------------------------------------------------")
print("------- Molecule 1: 'Model' Molecule 2: 'Reference' -------")
print(" #Atoms = " + str(molecule1.n_atoms) + " #Atoms = " + str(molecule2.n_atoms))
print(" #H-Atoms = " + str(molecule1.n_h_atoms) + " #H-Atoms = " + str(
molecule2.n_h_atoms))
print("-- Sym.-Idf. -- [xyz] / A | Sym.-Idf. -- [xyz] / A --")
if molecule1.n_atoms > molecule2.n_atoms:
common_number = molecule2.n_atoms
rest_number = molecule1.n_atoms - molecule2.n_atoms
to_print = 'molecule1'
elif molecule1.n_atoms < molecule2.n_atoms:
common_number = molecule1.n_atoms
rest_number = molecule2.n_atoms - molecule1.n_atoms
to_print = 'molecule2'
else:
common_number = molecule2.n_atoms
rest_number = 0
[print(" " + str(molecule1.sym_idf[entry]) + "\t" + str(np.around(molecule1.cor[entry], 3)) + "\t\t" +
str(molecule2.sym_idf[entry]) + "\t" + str(np.around(molecule2.cor[entry], 3)))
for entry in range(common_number)]
if rest_number != 0:
if to_print == 'molecule1':
[print(" " + str(molecule1.sym_idf[common_number + entry]) + "\t" +
str(np.around(molecule1.cor[common_number + entry], 3))) for entry in range(rest_number)]
else:
[print(" \t\t\t\t\t" + str(molecule2.sym_idf[common_number + entry]) + "\t" +
str(np.around(molecule2.cor[common_number + entry], 3))) for entry in range(rest_number)]
def pt_x_ray_menu(self, n_atoms, symOPs, picker_type):
print("\n-----------------------------------------------------------------------------")
print("========================= X-ray Data Modification ===========================")
print("-----------------------------------------------------------------------------")
print("-10 Exit the menu")
print("-5 Export structure to '.xyzs' file")
print("-4 Export structure to '.xyz' file")
print("-2 Change picker mode (current: '" + str(picker_type) + "')")
print("-1 Show the X-ray structure again")
print("-----------------------------------------------------------------------------")
print(" Current number of atoms : " + str(n_atoms))
print("-----------------------------------------------------------------------------")
[print(str(entry) + " expand by operation\t\t'" + str(symOPs[entry]) + "'") for entry in range(len(symOPs))]
print(str(entry + 1) + " expand by custom operation ")
print("-----------------------------------------------------------------------------")
choices = [-10, -5, -4, -2, -1]
choices.extend(range(len(symOPs) + 1))
return choices
def pt_match_menu(self, settings):
print("\n-----------------------------------------------------------------------------")
print("================ Symmetry Adjustments & Sequence Matching ===================")
print("-----------------------------------------------------------------------------")
print("-6 Set number of deviations which are highlighted in the plot (current = " + str(settings.n_dev) + ")")
print("-5 Load the saved status (save point available: '" + str(self.was_saved) + "')")
print("-4 Change plot settings")
print("-3 Manually swap atoms in Model structure")
print("-2 Change matching algorithm or solver")
print("-1 Match molecular sequences based on current alignment")
print("-----------------------------------------------------------------------------")
print(" Current matching algorithm : '" + str(self.match_alg) + "'")
print(" Current matching solver : '" + str(self.match_solv) + "'")
print(" Structures were matched : '" + str(self.is_matched) + "'")
print("-----------------------------------------------------------------------------")
print("0 ... exit the menu (no return)")
print("1 ... inversion at the origin")
print("2 ... reflection at the xy plane")
print("3 ... reflection at the xz plane")
print("4 ... reflection at the yz plane")
print("5 ... rotation around the x axis")
print("6 ... rotation around the y axis")
print("7 ... rotation around the z axis")
print("8 ... show the molecules again")
print("-----------------------------------------------------------------------------")
print("10 ... save current changes (status was saved: '" + str(self.was_saved) + "')")
print("20 ... export structures")
print("-----------------------------------------------------------------------------")
def pt_change_algorithm(self, alg_type):
if alg_type == 'solving':
pt_alg = self.match_solv
elif alg_type == 'matching':
pt_alg = self.match_alg
print("\n> Changed " + str(alg_type) + " algorithm to '" + str(pt_alg) + "'.")
def pt_algorithm_menu(self, molecule):
print("\n-----------------------------------------------------------------------------")
print("======================= Matching Algorithm Submenu ==========================")
print("-----------------------------------------------------------------------------")
print("-10 Return to upper menu")
print("-1 Show details of current solving algorithm ('" + str(self.match_solv) + "')")
print("0 Show details of current matching algorithm ('" + str(self.match_alg) + "')")
print("-----------------------------------------------------------------------------")
print("1 ... use absolute distance between atoms ('distance')")
print("2 ... use combination of absolut and relative distances ('combined')")
print("3 ... use random permutations ('brute_force')")
print("-----------------------------------------------------------------------------")
print("4 ... use 'Hungarian' solver for the permutation matrix ('hungarian')")
print("5 ... use 'aRMSD' solver for the permutation matrix ('standard')")
print("-----------------------------------------------------------------------------")
def pt_solve_algorithm_details(self):
print("\n-----------------------------------------------------------------------------")
print("Details about the algorithm '" + str(self.match_solv) + "':")
if self.match_solv == 'hungarian':
print("This 'Hungarian/Munkres' algorithm is the de facto default solution to")
print("cost problem which is similar to the matching of two molecular structures.")
print("It is quite fast and hence the default in 'aRMSD'.")
elif self.match_solv == 'standard':
print("The 'standard' solving algorithm is a simplified version of the 'Hungarian'")
print("algorithm and was initially developed in the first versions of 'aRMSD'. ")
print("It is not as fast as the implementation of the 'Hungarian/Munkres'")
print("algorithm but it should be tried if the defaults fail.")
def pt_match_algorithm_details(self):
print("\n-----------------------------------------------------------------------------")
print("Details about the algorithm '" + str(self.match_alg) + "':")
if self.match_alg == 'distance':
print("This algorithm uses the distances between the possible atom pairs for the")
print("creation of the permutation matrix. Hence it requires an alignment of")
print("sufficiently good quality - provided by the user through\nsymmetry transformations.")
elif self.match_alg == 'combined':
print("The 'combined' algorithm combine the distances between the possible atom")
print("pairs and the relative positions of the atoms within the molecule to")
print("create a reasonable permutation matrix. A sufficiently good alignment")
print("drastically improves the matching results.")
elif self.match_alg == 'brute_force':
print("This is an experimental algorithm that tries to find the best solution to")
print("the matching problem through all possible permutations. This will take a")
print("lot of time - however: nothing is required from the user.")
print("-----------------------------------------------------------------------------")
def pt_export_structure_menu(self, min_rad, max_rad):
print("\n-----------------------------------------------------------------------------")
print("============================= Export Structures =============================")
print("-----------------------------------------------------------------------------")
print("-10 Return to upper menu")
print("-1 Project atomic radii for export (current range: " + str(min_rad) + " to " + str(max_rad) + ")")
print("-----------------------------------------------------------------------------")
print("0 ... export data in two '.xyz' files")
print("1 ... export data in two '.xyzs' files")
print("2 ... export combined data in one '.xyzs' file")
print("-----------------------------------------------------------------------------")
def pt_export_kabsch_menu(self):
print("\n-----------------------------------------------------------------------------")
print("============================= Export Structures =============================")
print("-----------------------------------------------------------------------------")
print("-10 Return to upper menu")
print("-----------------------------------------------------------------------------")
print("0 ... export superposition in two '.xyz' files")
print("1 ... export superposition in one '.xyzs' files")
print("2 ... export aRMSD representation in one '.xyzs' file")
print("-----------------------------------------------------------------------------")
def pt_change_vtk_settings_menu(self, settings, molecule1, molecule2):
print("\n-----------------------------------------------------------------------------")
print("============================ Change VTK Settings ============================")
print("-----------------------------------------------------------------------------")
print("-10 Return to upper menu")
print("-1 Change current plotting style")
print("-----------------------------------------------------------------------------")
print(" Current plotting style : '" + str(settings.name) + "'")
print("-----------------------------------------------------------------------------")
print("0 ... draw labels (current = " + str(settings.draw_labels) + ")")
print("1 ... change label type (current = " + str(settings.label_type) + ")")
print("2 ... draw arrows (current = " + str(settings.draw_arrows) + ")")
print("3 ... draw legend (current = " + str(settings.draw_legend) + ")")
print("4 ... set global scale factor (current = " + str(settings.scale_glob) + ")")
print("5 ... set atom scale factor (current = " + str(settings.scale_atom) + ")")
print("6 ... set resolution (current = " + str(settings.res_atom) + ")")
print("7 ... set color of '" + str(molecule1.name) + "' (current = " + str(settings.col_model_hex) + ")")
print("8 ... set color of '" + str(molecule2.name) + "' (current = " + str(settings.col_refer_hex) + ")")
print("9 ... use lightning (current = " + str(settings.use_light) + ")")
print("10 ... set export magnification factor (current = " + str(settings.magnif_fact) + ")")
print("-----------------------------------------------------------------------------")
def pt_w_function_menu(self, align):
print("\n-----------------------------------------------------------------------------")
print("========================== Set Weighting Functions ==========================")
print("-----------------------------------------------------------------------------")
print("Info: For functions marked with a '*' the contributions from other atoms")
print(" to each individual atom (multi-center-correction) can be calculated")
print(" if requested.")
print("-----------------------------------------------------------------------------")
print("-10 Return to upper menu")
print("-----------------------------------------------------------------------------")
print(" Current weighting function : '" + str(align.wts_type) + "'")
print(" Calculate mcc contribution : '" + str(align.calc_mcc) + "'")
print("-----------------------------------------------------------------------------")
print("0 ... geometric / unweighted")
print("1 ... x-ray scattering factors (*)")
print("2 ... atomic masses")
print("3 ... number of electrons")
print("4 ... number of core electrons")
print("5 ... spherical electron densities (*)")
print("6 ... LDA electron densities (*)")
print("-----------------------------------------------------------------------------")
def pt_xsf_wrong_source(self):
print("Unrecognized X-ray source, use: 'MoKa', 'CuKa', 'CoKa', 'FeKa', 'CrKa'")
print("\n> Using 'MoKa' scattering factors (lambda = 0.071073 nm)")
def pt_xsf_import_error(self):
print("\n> ERROR: Scattering factor import failed, using prestored factors ...")
def pt_kabsch_menu(self, align, settings):
print("\n-----------------------------------------------------------------------------")
print("============== Kabsch Algorithm, Statistics & Visualization ================")
print("-----------------------------------------------------------------------------")
print("-10 Exit aRMSD")
print("-8 Plot aRMSD color map")
print("-7 Change general RMSD settings")
print("-6 Add/remove bond")
print("-4 Change plot settings")
print("-3 Define two substructures (structures are defined: '" + str(align.has_sub) + "')")
print("-2 Change weighting function")
print("-1 Perform Kabsch alignment (required for all functions)")
print("-----------------------------------------------------------------------------")
print(" Current weighting function : '" + str(align.wts_type) + "'")
print(" Calculate mcc contribution : '" + str(align.calc_mcc) + "'")
print(" Kabsch alignment performed : '" + str(align.has_kabsch) + "'")
print("-----------------------------------------------------------------------------")
print("0 ... visualize results in aRMSD representation")
print("1 ... visualize structural superposition")
print("2 ... perform statistic investigation of bond lengths and angles")
print("3 ... show RMSD results")
print("4 ... interpolate between the structures (cart., " + str(settings.n_steps_interp) + " steps)")
print("5 ... generate outfile")
print("20 ... export structural data")
print("-----------------------------------------------------------------------------")
def pt_change_rmsd_settings_menu(self, settings):
print("\n-----------------------------------------------------------------------------")
print("============================ Change RMSD Settings ===========================")
print("-----------------------------------------------------------------------------")
print("-10 Return to upper menu")
print("-5 Use RYG coloring scheme for 'aRMSD representation' (current = " + str(settings.use_aRMSD_col) + ")")
print("-----------------------------------------------------------------------------")
print("0 ... set maximum RMSD value for color projection (current = " + str(settings.max_RMSD_diff) + ")")
print("1 ... set the number of colors for the aRMSD representation (current = " + str(
settings.n_col_aRMSD) + ")")
print("2 ... set threshold for aRMSD bond comparison (current = " + str(settings.thresh) + ")")
print("3 ... set basic color of aRMSD bonds (current = " + str(settings.col_bnd_glob_hex) + ")")
print("4 ... set color of shortened bonds (current = " + str(settings.col_short_hex) + ")")
print("5 ... set color of elongated bonds (current = " + str(settings.col_long_hex) + ")")
print("6 ... set length of the bond intersection (current = " + str(1.0 - 2 * settings.n) + ")")
print("7 ... set precision for the aRMSD picker (current = " + str(settings.calc_prec) + ")")
print("8 ... set number of highest property deviations to be shown (current = " + str(
settings.n_max_diff) + ")")
print("9 ... set the number of points for structure interpolations (current = " + str(
settings.n_steps_interp) + ")")
print("-----------------------------------------------------------------------------")
def pt_change_rmsd_vtk_settings_menu(self, settings, align):
print("\n-----------------------------------------------------------------------------")
print("============================ Change VTK Settings ============================")
print("-----------------------------------------------------------------------------")
print("-10 Return to upper menu")
print("-----------------------------------------------------------------------------")
print("0 ... draw labels (current = " + str(settings.draw_labels) + ")")
print("1 ... change label type (current = " + str(settings.label_type) + ")")
print("2 ... set global scale factor (current = " + str(settings.scale_glob) + ")")
print("3 ... set resolution (current = " + str(settings.res_atom) + ")")
print("4 ... set color of '" + str(align.name1) + "' (current = " + str(settings.col_model_fin_hex) + ")")
print("5 ... set color of '" + str(align.name2) + "' (current = " + str(settings.col_refer_fin_hex) + ")")
print("6 ... use lightning (current = " + str(settings.use_light) + ")")
print("7 ... set export magnification factor (current = " + str(settings.magnif_fact) + ")")
print("8 ... draw color bar (current = " + str(settings.draw_col_map) + ")")
print("-----------------------------------------------------------------------------")
def pt_kabsch_alignment(self, w_function_type):
print("\nNow performing Kabsch alignment of weighted coordinates")
print("> The type of weighting function is: " + str(w_function_type))
def pt_rot_matrix(self, rot_matrix):
print("\nThe rotation matrix for the optimal alignment (from Standard Orientation) is:\n")
print("\t |" + "{:+06.8f}".format(rot_matrix[0][0]) + " " + "{:+06.8f}".format(rot_matrix[0][1]) +
" " + "{:+06.8f}".format(rot_matrix[0][2]) + "|")
print("\t U = |" + "{:+06.8f}".format(rot_matrix[1][0]) + " " + "{:+06.8f}".format(rot_matrix[1][1]) +
" " + "{:+06.8f}".format(rot_matrix[1][2]) + "|")
print("\t |" + "{:+06.8f}".format(rot_matrix[2][0]) + " " + "{:+06.8f}".format(rot_matrix[2][1]) +
" " + "{:+06.8f}".format(rot_matrix[2][2]) + "|")
def pt_bond_added(self, align, idx1, idx2):
print(
"\n> A bond between [" + str(align.sym_idf[idx1]) + " -- " + str(align.sym_idf[idx2]) + "] has been added!")
def pt_bond_removed(self, align, idx1, idx2):
print("\n> The bond between [" + str(align.sym_idf[idx1]) + " -- " + str(
align.sym_idf[idx2]) + "] has been removed!")
def pt_wrong_indices(self, align):
print("\n> ERROR: The given bond identifiers are out of range (1 - " + str(align.n_atoms + 1) + ")!")
| armsd/aRMSD | armsd/alog.py | Python | mit | 77,989 | [
"Pybel",
"VTK"
] | 7a219d80e64458b499f6f6d88fd85c38a390b9936366bbc672ef97084056b3ad |
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.utils import check_random_state, check_array, check_X_y
from sklearn.utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from sklearn.utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances is deprecated and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess is deprecated and will be removed in 0.20. "
"Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class is deprecated and will be removed in 0.20.
Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| dwettstein/pattern-recognition-2016 | mlp/gaussian_process/gaussian_process.py | Python | mit | 35,002 | [
"Gaussian"
] | 887d9e75b6555a0ed41ab2c892ea8d5e19e4bb8b5b5eb025e9c95fb45ce05e51 |
from django.test import TestCase
from django.utils import timezone
from django.contrib.auth.models import User
from txtalert.apps.therapyedge.importer import Importer, SEX_MAP
from txtalert.apps.therapyedge.xmlrpc import client
from txtalert.core.models import Patient, MSISDN, Visit, Clinic
from txtalert.apps.therapyedge.tests.utils import (PatientUpdate, ComingVisit, MissedVisit,
DoneVisit, DeletedVisit, create_instance)
from datetime import datetime, timedelta, date
import random
import logging
import iso8601
class ImporterTestCase(TestCase):
"""Testing the TherapyEdge import loop"""
fixtures = ['patients', 'clinics']
def setUp(self):
self.importer = Importer()
# make sure we're actually testing some data
self.assertTrue(Patient.objects.count() > 0)
self.clinic = Clinic.objects.all()[0]
self.user = User.objects.get(username="kumbu")
def tearDown(self):
pass
def test_update_local_patients(self):
"""Test the mapping of the incoming Patient objects to local copies"""
# mock received data from TherapyEdge XML-RPC
data = [(
'', # dr_site_id
'', # dr_site_name
'%s' % idx, # age, as string
random.choice(SEX_MAP.keys()), # sex
'2712345678%s' % idx, # celphone
random.choice(('true','false')), # dr_status
'02-7012%s' % idx # te_id
) for idx in range(0, 10)]
updated_patients = map(PatientUpdate._make, data)
local_patients = list(self.importer.update_local_patients(self.user, updated_patients))
self.assertEquals(len(local_patients), 10)
for updated_patient in updated_patients:
local_patient = Patient.objects.get(te_id=updated_patient.te_id)
# check for msisdn
msisdn = MSISDN.objects.get(msisdn=updated_patient.celphone)
self.assertTrue(msisdn in local_patient.msisdns.all())
# check for age
self.assertEquals(local_patient.age, int(updated_patient.age))
# check for sex
self.assertEquals(local_patient.sex, SEX_MAP[updated_patient.sex])
# check for te_id
self.assertEquals(local_patient.te_id, updated_patient.te_id)
def test_update_local_coming_visits(self):
data = [(
'', # dr_site_name
'', # dr_site_id
'false', # dr_status
'2009-11-1%s 00:00:00' % idx, # scheduled_visit_date
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
coming_visits = map(ComingVisit._make, data)
local_visits = set(self.importer.update_local_coming_visits(
self.user,
self.clinic,
coming_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for coming_visit in coming_visits:
# don't need to test this as Django does this for us
local_visit = Visit.objects.get(te_visit_id=coming_visit.key_id)
self.assertEquals(
iso8601.parse_date(coming_visit.scheduled_visit_date).date(),
local_visit.date
)
def test_update_local_missed_visits(self):
data = [(
'', # dr_site_name
'', # dr_site_id
'2009-11-1%s 00:00:00' % idx, # missed_date
'', # dr_status
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
missed_visits = map(MissedVisit._make, data)
local_visits = set(self.importer.update_local_missed_visits(
self.user,
self.clinic,
missed_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for missed_visit in missed_visits:
local_visit = Visit.objects.get(te_visit_id=missed_visit.key_id)
self.assertEquals(
iso8601.parse_date(missed_visit.missed_date).date(),
local_visit.date
)
def test_missed_visits(self):
# helper methods
def make_visit(named_tuple_klass, dictionary):
return named_tuple_klass._make(named_tuple_klass._fields) \
._replace(**dictionary)
# mock patient
patient = Patient.objects.all()[0]
# create a visit that's already been scheduled earlier, mock a
# previous import
visit = patient.visit_set.create(
te_visit_id='02-002173383',
date=date.today(),
status='s',
clinic=self.clinic
)
# create a missed visit
missed_visit = make_visit(MissedVisit, {
'dr_site_name': '',
'dr_site_id': '',
'dr_status': 'false',
'missed_date': '%s 00:00:00' % date.today(),
'key_id': '02-002173383',
'te_id': patient.te_id
})
# import the data
list(self.importer.update_local_missed_visits(self.user, self.clinic, [missed_visit]))
# get the visit and check its status
visit = patient.visit_set.get(te_visit_id='02-002173383')
self.assertEquals(visit.status, 'm')
def test_update_local_reschedules_from_missed(self):
"""missed visits in the future are reschedules"""
future_date = date.today() + timedelta(days=7) # one week ahead
# first plan the scheduleds
data = [(
'', # dr_site_name
'', # dr_site_id
'false', # dr_status
# scheduled_visit_date, force to start one day ahead of today
# to make sure they're always future dates
'%s 00:00:00' % (date.today() + timedelta(days=(idx+1))),
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
coming_visits = map(ComingVisit._make, data)
local_visits = set(self.importer.update_local_coming_visits(
self.user,
self.clinic,
coming_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for coming_visit in coming_visits:
# don't need to test this as Django does this for us
local_visit = Visit.objects.get(te_visit_id=coming_visit.key_id)
self.assertEquals('s', local_visit.status)
# now plan the future misseds, should be reschedules
data = [(
'', # dr_site_name
'', # dr_site_id
'%s 00:00:00' % future_date, # missed_date
'', # dr_status
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
rescheduled_visits = map(MissedVisit._make, data)
local_visits = set(self.importer.update_local_missed_visits(
self.user,
self.clinic,
rescheduled_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for rescheduled_visit in rescheduled_visits:
local_visit = Visit.objects.get(te_visit_id=rescheduled_visit.key_id)
self.assertEquals(local_visit.status, 'r')
def test_update_local_reschedules_from_coming(self):
"""future visits that get a new date in the future are reschedules"""
data = [(
'', # dr_site_name
'', # dr_site_id
'false', # dr_status
# scheduled_visit_date, force to start one day ahead of today
# to make sure they're always future dates
'%s 00:00:00' % (date.today() + timedelta(days=(idx+1))),
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
coming_visits = map(ComingVisit._make, data)
local_visits = set(self.importer.update_local_coming_visits(
self.user,
self.clinic,
coming_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for coming_visit in coming_visits:
# don't need to test this as Django does this for us
local_visit = Visit.objects.get(te_visit_id=coming_visit.key_id)
self.assertEquals('s', local_visit.status)
# send in a batch of future coming visits to mimick reschedules
future_date = date.today() + timedelta(days=7) # one week ahead
data = [(
'', # dr_site_name
'', # dr_site_id
'false', # dr_status
'%s 00:00:00' % future_date, # scheduled_visit_date
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
coming_visits = map(ComingVisit._make, data)
set(self.importer.update_local_coming_visits(self.user, self.clinic, coming_visits))
for coming_visit in coming_visits:
local_visit = Visit.objects.get(te_visit_id=coming_visit.key_id)
self.assertEquals('r', local_visit.status)
def test_update_local_done_visits(self):
data = [(
'2009-11-1%s 00:00:00' % idx, # done_date
'', # dr_site_id
'', # dr_status
'', # dr_site_name
'2009-10-1%s 00:00:00' % idx, # scheduled_date, mocked to be a month earlier
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
done_visits = map(DoneVisit._make, data)
local_visits = set(self.importer.update_local_done_visits(
self.user,
self.clinic,
done_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for done_visit in done_visits:
local_visit = Visit.objects.get(te_visit_id=done_visit.key_id)
# the visit should have the same done date
self.assertEquals(
iso8601.parse_date(done_visit.done_date).date(),
local_visit.date
)
# the visit should have the status of a, 'attended'
self.assertEquals(
local_visit.status,
'a'
)
def test_update_local_deleted_visits(self):
# first create the visit events to be deleted
data = [(
'', # dr_site_name
'', # dr_site_id
'false', # dr_status
'2009-11-1%s 00:00:00' % idx, # scheduled_visit_date
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
coming_visits = map(ComingVisit._make, data)
local_visits = set(self.importer.update_local_coming_visits(
self.user,
self.clinic,
coming_visits
))
self.assertEquals(len(coming_visits), len(local_visits))
data = [(
'02-00089421%s' % idx, # key_id
'false', # dr_status
'', # dr_site_id
patient.te_id, # te_id
'', # dr_site_name
) for idx, patient in enumerate(Patient.objects.all())]
deleted_visits = map(DeletedVisit._make, data)
# use list comprihensions because set() dedupes the list and for some
# reason it considers deleted the deleted django objects as dupes
# and returns a list of one
local_visits = [v for v in self.importer.update_local_deleted_visits(
self.user,
deleted_visits
)]
self.assertEquals(len(local_visits), Patient.objects.count())
for deleted_visit in deleted_visits:
self.assertEquals(
Visit.objects.filter(te_visit_id=deleted_visit.key_id).count(),
0
)
def test_for_history_duplication(self):
"""
Test for history duplication happening after numerous imports over time
The data for this test has been gleaned from the txtalert log being
used in production. For some reason imports that should be 'missed'
are set as 'rescheduled' and eventhough nothing changes in the
appointment, a historical visit is still saved.
"""
# create the patient for which we'll get the visits
patient = Patient.objects.create(te_id='02-82088', age=29, sex='m',
owner=self.user)
# importer
importer = Importer()
# [importer] 2010-03-18 08:00:37,705 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-18 08:01:39,354 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-19 08:00:36,876 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-19 08:01:36,747 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-20 08:00:29,600 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-20 08:01:30,926 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-21 08:00:28,052 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-21 08:01:33,909 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-22 08:00:27,711 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-22 08:01:33,549 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-23 08:00:26,453 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-23 08:01:36,731 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-25 09:00:41,774 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-25 09:00:41,850 DEBUG Updating existing Visit: 37361 / ({'date': datetime.date(2010, 3, 24), 'updated_at': datetime.datetime(2010, 3, 23, 8, 1, 36)} vs {'status': u'r', 'comment': u'', 'visit_type': u'', 'deleted': 0, 'created_at': datetime.datetime(2010, 3, 18, 8, 0, 37), 'updated_at': datetime.datetime(2010, 3, 23, 8, 1, 36), 'te_visit_id': u'02-091967084', 'date': datetime.date(2010, 3, 24), 'id': 37361L})
# [importer] 2010-03-25 09:01:40,902 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
visit = patient.visit_set.latest()
self.assertEquals(visit.status, 'm')
self.assertEquals(visit.history.count(), 1)
done_visit = create_instance(DoneVisit, {'dr_site_name': '', 'dr_site_id': '', 'done_date': '2010-03-24 00:00:00', 'scheduled_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_done_visit = importer.update_local_done_visit(self.user, self.clinic, done_visit)
visit = patient.visit_set.latest()
self.assertEquals(visit.status, 'a')
self.assertEquals(visit.history.count(), 2)
class PatchedClient(client.Client):
def __init__(self, **kwargs):
self.patches = kwargs
def mocked_patients_data(self, request, *args, **kwargs):
"""Mocking the response we get from TherapyEdge for a patients_update
call"""
logging.debug('Mocked rpc_call called with: %s, %s, %s' % (request, args, kwargs))
return self.patches[request]
class ImporterXmlRpcClientTestCase(TestCase):
fixtures = ['patients', 'clinics']
def setUp(self):
self.importer = Importer()
self.user = User.objects.get(username="kumbu")
# patching the client to automatically return our specified result
# sets without doing an XML-RPC call
patched_client = PatchedClient(
patients_update=[{
'dr_site_name': '',
'dr_site_id': '',
'age': '2%s' % i,
'sex': random.choice(['Male', 'Female']),
'celphone': '2712345678%s' % i,
'dr_status': '',
'te_id': patient.te_id,
} for i, patient in enumerate(Patient.objects.all())],
comingvisits=[{
'dr_site_name': '',
'dr_site_id': '',
'dr_status': '',
'scheduled_visit_date': str(timezone.now() + timedelta(days=2)),
'key_id': '02-1234%s' % i,
'te_id': patient.te_id,
} for i, patient in enumerate(Patient.objects.all())],
missedvisits=[{
'dr_site_name': '',
'dr_site_id': '',
'missed_date': str(timezone.now() - timedelta(days=2)),
'dr_status': '',
'key_id': '03-1234%s' % i,
'te_id': patient.te_id
} for i, patient in enumerate(Patient.objects.all())],
donevisits=[{
'done_date': str(timezone.now() - timedelta(days=2)),
'dr_site_id': '',
'dr_status': '',
'dr_site_name': '',
'scheduled_date': str(timezone.now() - timedelta(days=2)),
'key_id': '04-1234%s' % i,
'te_id': patient.te_id
} for i, patient in enumerate(Patient.objects.all())],
deletedvisits=[{
'key_id': '02-1234%s' % i,
'dr_status': '',
'dr_site_id': '',
'te_id': patient.te_id,
'dr_site_name': ''
} for i, patient in enumerate(Patient.objects.all())]
)
# monkey patching
self.importer.client.server.patients_data = patched_client.mocked_patients_data
self.clinic = Clinic.objects.all()[0] # make sure we have a clinic
self.assertTrue(Patient.objects.count()) # make sure our fixtures aren't empty
def tearDown(self):
pass
def test_import_updated_patients(self):
"""The xmlrpc client is largely some boilterplate code and some little
helpers that transform the returned Dict into class instances. We're
testing that functionality here. Since all the stuff uses the same boiler
plate code we're only testing it for one method call.
"""
updated_patients = self.importer.import_updated_patients(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
until=timezone.now()
)
updated_patients = list(updated_patients)
self.assertTrue(len(updated_patients), Patient.objects.count())
self.assertTrue(isinstance(updated_patients[0], Patient))
def test_import_coming_visits(self):
coming_visits = self.importer.import_coming_visits(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
until=timezone.now(),
visit_type=3 # Medical Visit
)
coming_visits = list(coming_visits)
self.assertEquals(len(coming_visits), Patient.objects.count())
self.assertTrue(isinstance(coming_visits[0], Visit))
def test_missed_visits(self):
missed_visits = self.importer.import_missed_visits(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
visit_type=3 # Medical Visit
)
missed_visits = list(missed_visits)
self.assertEquals(len(missed_visits), Patient.objects.count())
self.assertTrue(isinstance(missed_visits[0], Visit))
def test_done_visits(self):
done_visits = self.importer.import_done_visits(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
until=timezone.now(),
visit_type=3 # Medical Visit
)
done_visits = list(done_visits)
self.assertEquals(len(done_visits), Patient.objects.count())
self.assertTrue(isinstance(done_visits[0], Visit))
def test_deleted_visits(self):
# first have some coming visits
coming_visits = list(self.importer.import_coming_visits(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
until=timezone.now(),
visit_type=3 # Medical Visit
))
# then mark them as deleted, they're matched because they
# have the same key_id
deleted_visits = list(self.importer.import_deleted_visits(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
until=timezone.now(),
visit_type=3 # Medical Visit
))
self.assertEquals(len(deleted_visits), Patient.objects.count())
self.assertTrue(isinstance(deleted_visits[0], Visit))
| praekelt/txtalert | txtalert/apps/therapyedge/tests/importer.py | Python | gpl-3.0 | 28,383 | [
"VisIt"
] | 86ba841b701e16a6d38ce1e95a1d5b259f5e6fbbcece2eeda398f891670ab86b |
#!/usr/bin/env python
"""
MHC Epitope analysis
Created September 2013
Copyright (C) Damien Farrell
"""
import sys, os, shutil, string, types
import csv, glob, pickle, itertools
import re
import time, random
from collections import OrderedDict
from operator import itemgetter
#import matplotlib
#matplotlib.use('agg')
import pylab as plt
import numpy as np
import pandas as pd
import subprocess
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
import base, sequtils, tepitope, utilities
home = os.path.expanduser("~")
#fix paths!
genomespath = os.path.join(home, 'epitopedata')
datadir = os.path.join(home, 'testpredictions')
def plotheatmap(df, ax=None, cmap='Blues'):
if ax==None:
fig=plt.figure()
ax=fig.add_subplot(111)
else:
fig = ax.get_figure()
df = df._get_numeric_data()
hm=ax.pcolor(df,cmap=cmap)
#fig.colorbar(hm, ax=ax)
ax.set_xticks(np.arange(0.5, len(df.columns)))
ax.set_yticks(np.arange(0.5, len(df.index)))
ax.set_xticklabels(df.columns, minor=False, fontsize=10,rotation=45)
ax.set_yticklabels(df.index, minor=False, fontsize=8)
ax.set_ylim(0, len(df.index))
hm.set_clim(0,1)
plt.tight_layout()
return
def getAllBinders(path, method='tepitope', n=3, cutoff=0.95, promiscuous=True):
"""Get all promiscuous binders from a set of proteins in path"""
print 'getting binders..'
binders = []
m=method
if m=='bcell': return #not applicable
l=9
P = base.getPredictor(m)
files = glob.glob(os.path.join(path, '*.mpk'))
#get allele specific cutoffs
P.allelecutoffs = getCutoffs(path, method, cutoff, overwrite=True)
for f in files:
df = pd.read_msgpack(f)
if promiscuous== True:
b = P.getPromiscuousBinders(data=df,n=n)
else:
b = P.getBinders(data=df)
#print b[:5]
binders.append(b)
result = pd.concat(binders)
result['start'] = result.pos
result['end'] = result.pos+result.peptide.str.len()
return result
def getCutoffs(path, method, q=0.98, overwrite=False):
"""Get global cutoffs for predictions in path"""
quantfile = os.path.join(path,'quantiles.csv')
if not os.path.exists(quantfile) or overwrite==True:
base.getScoreDistributions(method, path)
quantiles = pd.read_csv(quantfile,index_col=0)
cutoffs = dict(quantiles.ix[q])
return cutoffs
def getNmer(df, n=20, key='translation'):
"""Get 20mer peptide"""
def getseq(x):
n=20
size=len(x[key])
if size<n:
o = int((n-size)/2.0)+1
s = x[key][x.start-o:x.end+o][:20]
else:
s = x[key][x.start:x.end]
return s
x = df.apply(getseq,1)
return x
def getOverlappingBinders(binders1, binders2, label='overlap'):
"""Overlap for binders with any set of peptides with start/end cols"""
new=[]
def overlap(x,b):
f = b[(b.pos>x.start) & (b.pos<x.end)]
#print x.locus_tag,x.start,x.end,x.peptide,len(f) #,f.peptide,f.pos
return len(f)
for n,df in binders1.groupby('name'):
b = binders2[binders2.name==n]
df[label] = df.apply(lambda r: overlap(r,b),axis=1)
new.append(df)
result = pd.concat(new)
print '%s with overlapping binders' %len(result[result[label]>0])
return result
def getOrthologs(seq,expect=10,hitlist_size=400,equery=None):
"""Fetch orthologous sequences using blast and return the records
as a dataframe"""
from Bio.Blast import NCBIXML,NCBIWWW
from Bio import Entrez, SeqIO
Entrez.email = "anon.user@ucd.ie"
#entrez_query = "mycobacterium[orgn]"
#db = '/local/blast/nr'
#SeqIO.write(SeqRecord(Seq(seq)), 'tempseq.faa', "fasta")
#sequtils.doLocalBlast(db, 'tempseq.faa', output='my_blast.xml', maxseqs=100, evalue=expect)
try:
print 'running blast..'
result_handle = NCBIWWW.qblast("blastp", "nr", seq, expect=expect,
hitlist_size=500,entrez_query=equery)
time.sleep(2)
except:
print 'blast timeout'
return
savefile = open("my_blast.xml", "w")
savefile.write(result_handle.read())
savefile.close()
result_handle = open("my_blast.xml")
df = sequtils.getBlastResults(result_handle)
df['accession'] = df.subj.apply(lambda x: x.split('|')[3])
df['definition'] = df.subj.apply(lambda x: x.split('|')[4])
df = df.drop(['subj','positive','query_length','score'],1)
print len(df)
df.drop_duplicates(subset=['definition'], inplace=True)
df = df[df['perc_ident']!=100]
print len(df)
#df = getAlignedBlastResults(df)
return df
def getAlignedBlastResults(df,aln=None,idkey='accession',productkey='definition'):
"""Get gapped alignment from blast results """
sequtils.dataframe2Fasta(df, idkey=idkey, seqkey='sequence',
productkey=productkey, outfile='blast_found.faa')
aln = sequtils.muscleAlignment("blast_found.faa")
alnrows = [[a.id,str(a.seq)] for a in aln]
alndf = pd.DataFrame(alnrows,columns=['accession','seq'])
#res = df.merge(alndf, left_index=True, right_index=True)
res = df.merge(alndf, on=['accession'])
res = res.drop('sequence',1)
#get rid of duplicate hits
#res.drop_duplicates(subset=['definition','seq'], inplace=True)
res = res.sort('identity',ascending=False)
print '%s hits, %s filtered' %(len(df), len(res))
return res
def setBlastLink(df):
def makelink(x):
return '<a href=http://www.ncbi.nlm.nih.gov/protein/%s> %s </a>' %(x,x)
df['accession'] = df.accession.apply(makelink)
return df
def alignment2Dataframe(aln):
"""Blast results alignment 2 dataframe for making tables"""
alnrows = [[a.id,str(a.seq)] for a in aln]
df = pd.DataFrame(alnrows,columns=['name','seq'])
return df
def findClusters(binders, method, dist=None, minsize=3,
genome=None):
"""Get clusters of binders for all predictions"""
C=[]
grps = list(binders.groupby('name'))
print '%s proteins with binders' %len(grps)
length = len(binders.head(1).peptide.max())
if dist==None:
dist = length+1
print 'using dist for clusters: %s' %dist
for n,b in grps:
if len(b)==0: continue
clusts = base.dbscan(b,dist=dist,minsize=minsize)
if len(clusts) == 0:
continue
for c in clusts:
gaps = [c[i]-c[i-1] for i in range(1,len(c))]
C.append([n,min(c),max(c)+length,len(c)])
if len(C)==0:
print 'no clusters'
return pd.DataFrame()
x = pd.DataFrame(C,columns=['name','start','end','binders'])
x['clustersize'] = (x.end-x.start)
x['density'] = x.binders/(x.end-x.start)
x['method'] = method
if genome is not None:
temp = x.merge(genome[['locus_tag','gene','translation']],
left_on='name',right_on='locus_tag')
x['peptide'] = getNmer(temp)
x = x.sort(['binders','density'],ascending=False)
print '%s clusters found in %s proteins' %(len(x),len(x.groupby('name')))
print
return x
def genomeAnalysis(datadir,label,gname,method):
"""this method should be made independent of web app paths etc"""
path = os.path.join(datadir, '%s/%s/%s' %(label,gname,method))
#path='test'
gfile = os.path.join(genomespath,'%s.gb' %gname)
g = sequtils.genbank2Dataframe(gfile, cds=True)
b = getAllBinders(path, method=method, n=5)
P = base.getPredictor(method)
res = b.groupby('name').agg({P.scorekey:[np.mean,np.size,np.max]}).sort()
res.columns = res.columns.get_level_values(1)
res = res.merge(g[['locus_tag','length','gene','product','order']],
left_index=True,right_on='locus_tag')
res['perc'] = res['size']/res.length*100
res = res.sort('perc',ascending=False)
top = b.groupby('peptide').agg({P.scorekey:np.mean,'allele':np.max,
'name': lambda x: x}).reset_index()
top = top.sort(P.scorekey,ascending=P.rankascending)
cl = findClusters(b, method, dist=9, minsize=3)
if cl is not None:
gc = cl.groupby('name').agg({'density':np.max})
res = res.merge(gc,left_on='locus_tag',right_index=True)
#print res[:10]
return res
def testFeatures():
"""test feature handling"""
fname = os.path.join(datadir,'MTB-H37Rv.gb')
df = sequtils.genbank2Dataframe(fname, cds=True)
df = df.set_index('locus_tag')
keys = df.index
name='Rv0011c'
row = df.ix[name]
seq = row.translation
prod = row['product']
rec = SeqRecord(Seq(seq),id=name,description=prod)
fastafmt = rec.format("fasta")
print fastafmt
print row.to_dict()
ind = keys.get_loc(name)
previous = keys[ind-1]
if ind<len(keys)-1:
next = keys[ind+1]
else:
next=None
return
def testrun(gname):
method = 'tepitope'#'iedbmhc1'#'netmhciipan'
path='test'
gfile = os.path.join(genomespath,'%s.gb' %gname)
df = sequtils.genbank2Dataframe(gfile, cds=True)
#names = list(df.locus_tag[:1])
names=['VP24']
alleles1 = ["HLA-A*02:02", "HLA-A*11:01", "HLA-A*32:07", "HLA-B*15:17", "HLA-B*51:01",
"HLA-C*04:01", "HLA-E*01:03"]
alleles2 = ["HLA-DRB1*0101", "HLA-DRB1*0305", "HLA-DRB1*0812", "HLA-DRB1*1196", "HLA-DRB1*1346",
"HLA-DRB1*1455", "HLA-DRB1*1457", "HLA-DRB1*1612", "HLA-DRB4*0107", "HLA-DRB5*0203"]
P = base.getPredictor(method)
P.iedbmethod='IEDB_recommended' #'netmhcpan'
P.predictProteins(df,length=11,alleles=alleles2,names=names,
save=True,path=path)
f = os.path.join('test', names[0]+'.mpk')
df = pd.read_msgpack(f)
P.data=df
#b = P.getBinders(data=df)
#print b[:20]
base.getScoreDistributions(method, path)
return
def testBcell(gname):
path='test'
gfile = os.path.join(genomespath,'%s.gb' %gname)
df = sequtils.genbank2Dataframe(gfile, cds=True)
names=['VP24']
P = base.getPredictor('bcell')
P.iedbmethod='Chou-Fasman'
P.predictProteins(df,names=names,save=True,path=path)
print P.data
return
def testconservation(label,gname):
"""Conservation analysis"""
tag='VP24'
pd.set_option('max_colwidth', 800)
gfile = os.path.join(genomespath,'%s.gb' %gname)
g = sequtils.genbank2Dataframe(gfile, cds=True)
res = g[g['locus_tag']==tag]
seq = res.translation.head(1).squeeze()
print seq
#alnrows = getOrthologs(seq)
#alnrows.to_csv('blast_%s.csv' %tag)
alnrows = pd.read_csv('blast_%s.csv' %tag,index_col=0)
alnrows.drop_duplicates(subset=['accession'], inplace=True)
alnrows = alnrows[alnrows['perc_ident']>=60]
seqs=[SeqRecord(Seq(a.sequence),a.accession) for i,a in alnrows.iterrows()]
print seqs[:2]
sequtils.distanceTree(seqs=seqs)#,ref=seqs[0])
#sequtils.ETETree(seqs, ref, metric)
#df = sequtils.getFastaProteins("blast_found.faa",idindex=3)
'''method='tepitope'
P = base.getPredictor(method)
P.predictSequences(df,seqkey='sequence')
b = P.getBinders()'''
return
def getLocalOrthologs(seq, db):
"""Get alignment for a protein using local blast db"""
SeqIO.write(SeqRecord(Seq(seq)), 'tempseq.faa', "fasta")
sequtils.doLocalBlast(db, 'tempseq.faa', output='my_blast.xml', maxseqs=30)
result_handle = open("my_blast.xml")
df = sequtils.getBlastResults(result_handle)
return df
def findConservedPeptide(peptide, recs):
"""Find sequences where a peptide is conserved"""
f=[]
for i,a in recs.iterrows():
seq = a.sequence.replace('-','')
found = seq.find(peptide)
f.append(found)
s = pd.DataFrame(f,columns=['found'],index=recs.accession)
s = s.replace(-1,np.nan)
#print s
res = s.count()
return s
def getPredictions(path,tag,method='tepitope',q=0.96):
"""Get predictions from file system"""
q=round(q,2)
#preds = OrderedDict()
cutoffs = {}
filename = os.path.join(path, tag+'.mpk')
if not os.path.exists(filename):
return
df = pd.read_msgpack(filename)
pred = base.getPredictor(name=method, data=df)
cutoffs = pred.allelecutoffs = getCutoffs(path, method, q)
pred = pred
return pred
def test():
gname = 'ebolavirus'
label = 'test'
testrun(gname)
#testBcell(gname)
#testgenomeanalysis(label,gname,method)
#testconservation(label,gname)
#testFeatures()
return
if __name__ == '__main__':
pd.set_option('display.width', 600)
test()
| dmnfarrell/epitopemap | modules/mhcpredict/analysis.py | Python | apache-2.0 | 12,607 | [
"BLAST"
] | 62080254679aadb7f394d02578c1bb5ff1341feea01e0e9220be8b75bed57361 |
from __future__ import absolute_import
import cython
cython.declare(PyrexTypes=object, Naming=object, ExprNodes=object, Nodes=object,
Options=object, UtilNodes=object, LetNode=object,
LetRefNode=object, TreeFragment=object, EncodedString=object,
error=object, warning=object, copy=object, _unicode=object)
import copy
from . import PyrexTypes
from . import Naming
from . import ExprNodes
from . import Nodes
from . import Options
from . import Builtin
from .Visitor import VisitorTransform, TreeVisitor
from .Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform
from .UtilNodes import LetNode, LetRefNode
from .TreeFragment import TreeFragment
from .StringEncoding import EncodedString, _unicode
from .Errors import error, warning, CompileError, InternalError
from .Code import UtilityCode
class NameNodeCollector(TreeVisitor):
"""Collect all NameNodes of a (sub-)tree in the ``name_nodes``
attribute.
"""
def __init__(self):
super(NameNodeCollector, self).__init__()
self.name_nodes = []
def visit_NameNode(self, node):
self.name_nodes.append(node)
def visit_Node(self, node):
self._visitchildren(node, None)
class SkipDeclarations(object):
"""
Variable and function declarations can often have a deep tree structure,
and yet most transformations don't need to descend to this depth.
Declaration nodes are removed after AnalyseDeclarationsTransform, so there
is no need to use this for transformations after that point.
"""
def visit_CTypeDefNode(self, node):
return node
def visit_CVarDefNode(self, node):
return node
def visit_CDeclaratorNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return node
def visit_CEnumDefNode(self, node):
return node
def visit_CStructOrUnionDefNode(self, node):
return node
class NormalizeTree(CythonTransform):
"""
This transform fixes up a few things after parsing
in order to make the parse tree more suitable for
transforms.
a) After parsing, blocks with only one statement will
be represented by that statement, not by a StatListNode.
When doing transforms this is annoying and inconsistent,
as one cannot in general remove a statement in a consistent
way and so on. This transform wraps any single statements
in a StatListNode containing a single statement.
b) The PassStatNode is a noop and serves no purpose beyond
plugging such one-statement blocks; i.e., once parsed a
` "pass" can just as well be represented using an empty
StatListNode. This means less special cases to worry about
in subsequent transforms (one always checks to see if a
StatListNode has no children to see if the block is empty).
"""
def __init__(self, context):
super(NormalizeTree, self).__init__(context)
self.is_in_statlist = False
self.is_in_expr = False
def visit_ExprNode(self, node):
stacktmp = self.is_in_expr
self.is_in_expr = True
self.visitchildren(node)
self.is_in_expr = stacktmp
return node
def visit_StatNode(self, node, is_listcontainer=False):
stacktmp = self.is_in_statlist
self.is_in_statlist = is_listcontainer
self.visitchildren(node)
self.is_in_statlist = stacktmp
if not self.is_in_statlist and not self.is_in_expr:
return Nodes.StatListNode(pos=node.pos, stats=[node])
else:
return node
def visit_StatListNode(self, node):
self.is_in_statlist = True
self.visitchildren(node)
self.is_in_statlist = False
return node
def visit_ParallelAssignmentNode(self, node):
return self.visit_StatNode(node, True)
def visit_CEnumDefNode(self, node):
return self.visit_StatNode(node, True)
def visit_CStructOrUnionDefNode(self, node):
return self.visit_StatNode(node, True)
def visit_PassStatNode(self, node):
"""Eliminate PassStatNode"""
if not self.is_in_statlist:
return Nodes.StatListNode(pos=node.pos, stats=[])
else:
return []
def visit_ExprStatNode(self, node):
"""Eliminate useless string literals"""
if node.expr.is_string_literal:
return self.visit_PassStatNode(node)
else:
return self.visit_StatNode(node)
def visit_CDeclaratorNode(self, node):
return node
class PostParseError(CompileError): pass
# error strings checked by unit tests, so define them
ERR_CDEF_INCLASS = 'Cannot assign default value to fields in cdef classes, structs or unions'
ERR_BUF_DEFAULTS = 'Invalid buffer defaults specification (see docs)'
ERR_INVALID_SPECIALATTR_TYPE = 'Special attributes must not have a type declared'
class PostParse(ScopeTrackingTransform):
"""
Basic interpretation of the parse tree, as well as validity
checking that can be done on a very basic level on the parse
tree (while still not being a problem with the basic syntax,
as such).
Specifically:
- Default values to cdef assignments are turned into single
assignments following the declaration (everywhere but in class
bodies, where they raise a compile error)
- Interpret some node structures into Python runtime values.
Some nodes take compile-time arguments (currently:
TemplatedTypeNode[args] and __cythonbufferdefaults__ = {args}),
which should be interpreted. This happens in a general way
and other steps should be taken to ensure validity.
Type arguments cannot be interpreted in this way.
- For __cythonbufferdefaults__ the arguments are checked for
validity.
TemplatedTypeNode has its directives interpreted:
Any first positional argument goes into the "dtype" attribute,
any "ndim" keyword argument goes into the "ndim" attribute and
so on. Also it is checked that the directive combination is valid.
- __cythonbufferdefaults__ attributes are parsed and put into the
type information.
Note: Currently Parsing.py does a lot of interpretation and
reorganization that can be refactored into this transform
if a more pure Abstract Syntax Tree is wanted.
"""
def __init__(self, context):
super(PostParse, self).__init__(context)
self.specialattribute_handlers = {
'__cythonbufferdefaults__' : self.handle_bufferdefaults
}
def visit_LambdaNode(self, node):
# unpack a lambda expression into the corresponding DefNode
collector = YieldNodeCollector()
collector.visitchildren(node.result_expr)
if collector.yields or collector.awaits or isinstance(node.result_expr, ExprNodes.YieldExprNode):
body = Nodes.ExprStatNode(
node.result_expr.pos, expr=node.result_expr)
else:
body = Nodes.ReturnStatNode(
node.result_expr.pos, value=node.result_expr)
node.def_node = Nodes.DefNode(
node.pos, name=node.name,
args=node.args, star_arg=node.star_arg,
starstar_arg=node.starstar_arg,
body=body, doc=None)
self.visitchildren(node)
return node
def visit_GeneratorExpressionNode(self, node):
# unpack a generator expression into the corresponding DefNode
node.def_node = Nodes.DefNode(node.pos, name=node.name,
doc=None,
args=[], star_arg=None,
starstar_arg=None,
body=node.loop)
self.visitchildren(node)
return node
# cdef variables
def handle_bufferdefaults(self, decl):
if not isinstance(decl.default, ExprNodes.DictNode):
raise PostParseError(decl.pos, ERR_BUF_DEFAULTS)
self.scope_node.buffer_defaults_node = decl.default
self.scope_node.buffer_defaults_pos = decl.pos
def visit_CVarDefNode(self, node):
# This assumes only plain names and pointers are assignable on
# declaration. Also, it makes use of the fact that a cdef decl
# must appear before the first use, so we don't have to deal with
# "i = 3; cdef int i = i" and can simply move the nodes around.
try:
self.visitchildren(node)
stats = [node]
newdecls = []
for decl in node.declarators:
declbase = decl
while isinstance(declbase, Nodes.CPtrDeclaratorNode):
declbase = declbase.base
if isinstance(declbase, Nodes.CNameDeclaratorNode):
if declbase.default is not None:
if self.scope_type in ('cclass', 'pyclass', 'struct'):
if isinstance(self.scope_node, Nodes.CClassDefNode):
handler = self.specialattribute_handlers.get(decl.name)
if handler:
if decl is not declbase:
raise PostParseError(decl.pos, ERR_INVALID_SPECIALATTR_TYPE)
handler(decl)
continue # Remove declaration
raise PostParseError(decl.pos, ERR_CDEF_INCLASS)
first_assignment = self.scope_type != 'module'
stats.append(Nodes.SingleAssignmentNode(node.pos,
lhs=ExprNodes.NameNode(node.pos, name=declbase.name),
rhs=declbase.default, first=first_assignment))
declbase.default = None
newdecls.append(decl)
node.declarators = newdecls
return stats
except PostParseError as e:
# An error in a cdef clause is ok, simply remove the declaration
# and try to move on to report more errors
self.context.nonfatal_error(e)
return None
# Split parallel assignments (a,b = b,a) into separate partial
# assignments that are executed rhs-first using temps. This
# restructuring must be applied before type analysis so that known
# types on rhs and lhs can be matched directly. It is required in
# the case that the types cannot be coerced to a Python type in
# order to assign from a tuple.
def visit_SingleAssignmentNode(self, node):
self.visitchildren(node)
return self._visit_assignment_node(node, [node.lhs, node.rhs])
def visit_CascadedAssignmentNode(self, node):
self.visitchildren(node)
return self._visit_assignment_node(node, node.lhs_list + [node.rhs])
def _visit_assignment_node(self, node, expr_list):
"""Flatten parallel assignments into separate single
assignments or cascaded assignments.
"""
if sum([ 1 for expr in expr_list
if expr.is_sequence_constructor or expr.is_string_literal ]) < 2:
# no parallel assignments => nothing to do
return node
expr_list_list = []
flatten_parallel_assignments(expr_list, expr_list_list)
temp_refs = []
eliminate_rhs_duplicates(expr_list_list, temp_refs)
nodes = []
for expr_list in expr_list_list:
lhs_list = expr_list[:-1]
rhs = expr_list[-1]
if len(lhs_list) == 1:
node = Nodes.SingleAssignmentNode(rhs.pos,
lhs = lhs_list[0], rhs = rhs)
else:
node = Nodes.CascadedAssignmentNode(rhs.pos,
lhs_list = lhs_list, rhs = rhs)
nodes.append(node)
if len(nodes) == 1:
assign_node = nodes[0]
else:
assign_node = Nodes.ParallelAssignmentNode(nodes[0].pos, stats = nodes)
if temp_refs:
duplicates_and_temps = [ (temp.expression, temp)
for temp in temp_refs ]
sort_common_subsequences(duplicates_and_temps)
for _, temp_ref in duplicates_and_temps[::-1]:
assign_node = LetNode(temp_ref, assign_node)
return assign_node
def _flatten_sequence(self, seq, result):
for arg in seq.args:
if arg.is_sequence_constructor:
self._flatten_sequence(arg, result)
else:
result.append(arg)
return result
def visit_DelStatNode(self, node):
self.visitchildren(node)
node.args = self._flatten_sequence(node, [])
return node
def visit_ExceptClauseNode(self, node):
if node.is_except_as:
# except-as must delete NameNode target at the end
del_target = Nodes.DelStatNode(
node.pos,
args=[ExprNodes.NameNode(
node.target.pos, name=node.target.name)],
ignore_nonexisting=True)
node.body = Nodes.StatListNode(
node.pos,
stats=[Nodes.TryFinallyStatNode(
node.pos,
body=node.body,
finally_clause=Nodes.StatListNode(
node.pos,
stats=[del_target]))])
self.visitchildren(node)
return node
def eliminate_rhs_duplicates(expr_list_list, ref_node_sequence):
"""Replace rhs items by LetRefNodes if they appear more than once.
Creates a sequence of LetRefNodes that set up the required temps
and appends them to ref_node_sequence. The input list is modified
in-place.
"""
seen_nodes = set()
ref_nodes = {}
def find_duplicates(node):
if node.is_literal or node.is_name:
# no need to replace those; can't include attributes here
# as their access is not necessarily side-effect free
return
if node in seen_nodes:
if node not in ref_nodes:
ref_node = LetRefNode(node)
ref_nodes[node] = ref_node
ref_node_sequence.append(ref_node)
else:
seen_nodes.add(node)
if node.is_sequence_constructor:
for item in node.args:
find_duplicates(item)
for expr_list in expr_list_list:
rhs = expr_list[-1]
find_duplicates(rhs)
if not ref_nodes:
return
def substitute_nodes(node):
if node in ref_nodes:
return ref_nodes[node]
elif node.is_sequence_constructor:
node.args = list(map(substitute_nodes, node.args))
return node
# replace nodes inside of the common subexpressions
for node in ref_nodes:
if node.is_sequence_constructor:
node.args = list(map(substitute_nodes, node.args))
# replace common subexpressions on all rhs items
for expr_list in expr_list_list:
expr_list[-1] = substitute_nodes(expr_list[-1])
def sort_common_subsequences(items):
"""Sort items/subsequences so that all items and subsequences that
an item contains appear before the item itself. This is needed
because each rhs item must only be evaluated once, so its value
must be evaluated first and then reused when packing sequences
that contain it.
This implies a partial order, and the sort must be stable to
preserve the original order as much as possible, so we use a
simple insertion sort (which is very fast for short sequences, the
normal case in practice).
"""
def contains(seq, x):
for item in seq:
if item is x:
return True
elif item.is_sequence_constructor and contains(item.args, x):
return True
return False
def lower_than(a,b):
return b.is_sequence_constructor and contains(b.args, a)
for pos, item in enumerate(items):
key = item[1] # the ResultRefNode which has already been injected into the sequences
new_pos = pos
for i in range(pos-1, -1, -1):
if lower_than(key, items[i][0]):
new_pos = i
if new_pos != pos:
for i in range(pos, new_pos, -1):
items[i] = items[i-1]
items[new_pos] = item
def unpack_string_to_character_literals(literal):
chars = []
pos = literal.pos
stype = literal.__class__
sval = literal.value
sval_type = sval.__class__
for char in sval:
cval = sval_type(char)
chars.append(stype(pos, value=cval, constant_result=cval))
return chars
def flatten_parallel_assignments(input, output):
# The input is a list of expression nodes, representing the LHSs
# and RHS of one (possibly cascaded) assignment statement. For
# sequence constructors, rearranges the matching parts of both
# sides into a list of equivalent assignments between the
# individual elements. This transformation is applied
# recursively, so that nested structures get matched as well.
rhs = input[-1]
if (not (rhs.is_sequence_constructor or isinstance(rhs, ExprNodes.UnicodeNode))
or not sum([lhs.is_sequence_constructor for lhs in input[:-1]])):
output.append(input)
return
complete_assignments = []
if rhs.is_sequence_constructor:
rhs_args = rhs.args
elif rhs.is_string_literal:
rhs_args = unpack_string_to_character_literals(rhs)
rhs_size = len(rhs_args)
lhs_targets = [[] for _ in range(rhs_size)]
starred_assignments = []
for lhs in input[:-1]:
if not lhs.is_sequence_constructor:
if lhs.is_starred:
error(lhs.pos, "starred assignment target must be in a list or tuple")
complete_assignments.append(lhs)
continue
lhs_size = len(lhs.args)
starred_targets = sum([1 for expr in lhs.args if expr.is_starred])
if starred_targets > 1:
error(lhs.pos, "more than 1 starred expression in assignment")
output.append([lhs,rhs])
continue
elif lhs_size - starred_targets > rhs_size:
error(lhs.pos, "need more than %d value%s to unpack"
% (rhs_size, (rhs_size != 1) and 's' or ''))
output.append([lhs,rhs])
continue
elif starred_targets:
map_starred_assignment(lhs_targets, starred_assignments,
lhs.args, rhs_args)
elif lhs_size < rhs_size:
error(lhs.pos, "too many values to unpack (expected %d, got %d)"
% (lhs_size, rhs_size))
output.append([lhs,rhs])
continue
else:
for targets, expr in zip(lhs_targets, lhs.args):
targets.append(expr)
if complete_assignments:
complete_assignments.append(rhs)
output.append(complete_assignments)
# recursively flatten partial assignments
for cascade, rhs in zip(lhs_targets, rhs_args):
if cascade:
cascade.append(rhs)
flatten_parallel_assignments(cascade, output)
# recursively flatten starred assignments
for cascade in starred_assignments:
if cascade[0].is_sequence_constructor:
flatten_parallel_assignments(cascade, output)
else:
output.append(cascade)
def map_starred_assignment(lhs_targets, starred_assignments, lhs_args, rhs_args):
# Appends the fixed-position LHS targets to the target list that
# appear left and right of the starred argument.
#
# The starred_assignments list receives a new tuple
# (lhs_target, rhs_values_list) that maps the remaining arguments
# (those that match the starred target) to a list.
# left side of the starred target
for i, (targets, expr) in enumerate(zip(lhs_targets, lhs_args)):
if expr.is_starred:
starred = i
lhs_remaining = len(lhs_args) - i - 1
break
targets.append(expr)
else:
raise InternalError("no starred arg found when splitting starred assignment")
# right side of the starred target
for i, (targets, expr) in enumerate(zip(lhs_targets[-lhs_remaining:],
lhs_args[starred + 1:])):
targets.append(expr)
# the starred target itself, must be assigned a (potentially empty) list
target = lhs_args[starred].target # unpack starred node
starred_rhs = rhs_args[starred:]
if lhs_remaining:
starred_rhs = starred_rhs[:-lhs_remaining]
if starred_rhs:
pos = starred_rhs[0].pos
else:
pos = target.pos
starred_assignments.append([
target, ExprNodes.ListNode(pos=pos, args=starred_rhs)])
class PxdPostParse(CythonTransform, SkipDeclarations):
"""
Basic interpretation/validity checking that should only be
done on pxd trees.
A lot of this checking currently happens in the parser; but
what is listed below happens here.
- "def" functions are let through only if they fill the
getbuffer/releasebuffer slots
- cdef functions are let through only if they are on the
top level and are declared "inline"
"""
ERR_INLINE_ONLY = "function definition in pxd file must be declared 'cdef inline'"
ERR_NOGO_WITH_INLINE = "inline function definition in pxd file cannot be '%s'"
def __call__(self, node):
self.scope_type = 'pxd'
return super(PxdPostParse, self).__call__(node)
def visit_CClassDefNode(self, node):
old = self.scope_type
self.scope_type = 'cclass'
self.visitchildren(node)
self.scope_type = old
return node
def visit_FuncDefNode(self, node):
# FuncDefNode always come with an implementation (without
# an imp they are CVarDefNodes..)
err = self.ERR_INLINE_ONLY
if (isinstance(node, Nodes.DefNode) and self.scope_type == 'cclass'
and node.name in ('__getbuffer__', '__releasebuffer__')):
err = None # allow these slots
if isinstance(node, Nodes.CFuncDefNode):
if (u'inline' in node.modifiers and
self.scope_type in ('pxd', 'cclass')):
node.inline_in_pxd = True
if node.visibility != 'private':
err = self.ERR_NOGO_WITH_INLINE % node.visibility
elif node.api:
err = self.ERR_NOGO_WITH_INLINE % 'api'
else:
err = None # allow inline function
else:
err = self.ERR_INLINE_ONLY
if err:
self.context.nonfatal_error(PostParseError(node.pos, err))
return None
else:
return node
class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
"""
After parsing, directives can be stored in a number of places:
- #cython-comments at the top of the file (stored in ModuleNode)
- Command-line arguments overriding these
- @cython.directivename decorators
- with cython.directivename: statements
This transform is responsible for interpreting these various sources
and store the directive in two ways:
- Set the directives attribute of the ModuleNode for global directives.
- Use a CompilerDirectivesNode to override directives for a subtree.
(The first one is primarily to not have to modify with the tree
structure, so that ModuleNode stay on top.)
The directives are stored in dictionaries from name to value in effect.
Each such dictionary is always filled in for all possible directives,
using default values where no value is given by the user.
The available directives are controlled in Options.py.
Note that we have to run this prior to analysis, and so some minor
duplication of functionality has to occur: We manually track cimports
and which names the "cython" module may have been imported to.
"""
unop_method_nodes = {
'typeof': ExprNodes.TypeofNode,
'operator.address': ExprNodes.AmpersandNode,
'operator.dereference': ExprNodes.DereferenceNode,
'operator.preincrement' : ExprNodes.inc_dec_constructor(True, '++'),
'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'),
'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'),
'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'),
# For backwards compatibility.
'address': ExprNodes.AmpersandNode,
}
binop_method_nodes = {
'operator.comma' : ExprNodes.c_binop_constructor(','),
}
special_methods = set(['declare', 'union', 'struct', 'typedef',
'sizeof', 'cast', 'pointer', 'compiled',
'NULL', 'fused_type', 'parallel'])
special_methods.update(unop_method_nodes)
valid_parallel_directives = set([
"parallel",
"prange",
"threadid",
#"threadsavailable",
])
def __init__(self, context, compilation_directive_defaults):
super(InterpretCompilerDirectives, self).__init__(context)
self.cython_module_names = set()
self.directive_names = {'staticmethod': 'staticmethod'}
self.parallel_directives = {}
directives = copy.deepcopy(Options.directive_defaults)
for key, value in compilation_directive_defaults.items():
directives[_unicode(key)] = copy.deepcopy(value)
self.directives = directives
def check_directive_scope(self, pos, directive, scope):
legal_scopes = Options.directive_scopes.get(directive, None)
if legal_scopes and scope not in legal_scopes:
self.context.nonfatal_error(PostParseError(pos, 'The %s compiler directive '
'is not allowed in %s scope' % (directive, scope)))
return False
else:
if (directive not in Options.directive_defaults
and directive not in Options.directive_types):
error(pos, "Invalid directive: '%s'." % (directive,))
return True
# Set up processing and handle the cython: comments.
def visit_ModuleNode(self, node):
for key in sorted(node.directive_comments):
if not self.check_directive_scope(node.pos, key, 'module'):
self.wrong_scope_error(node.pos, key, 'module')
del node.directive_comments[key]
self.module_scope = node.scope
self.directives.update(node.directive_comments)
node.directives = self.directives
node.parallel_directives = self.parallel_directives
self.visitchildren(node)
node.cython_module_names = self.cython_module_names
return node
# The following four functions track imports and cimports that
# begin with "cython"
def is_cython_directive(self, name):
return (name in Options.directive_types or
name in self.special_methods or
PyrexTypes.parse_basic_type(name))
def is_parallel_directive(self, full_name, pos):
"""
Checks to see if fullname (e.g. cython.parallel.prange) is a valid
parallel directive. If it is a star import it also updates the
parallel_directives.
"""
result = (full_name + ".").startswith("cython.parallel.")
if result:
directive = full_name.split('.')
if full_name == u"cython.parallel":
self.parallel_directives[u"parallel"] = u"cython.parallel"
elif full_name == u"cython.parallel.*":
for name in self.valid_parallel_directives:
self.parallel_directives[name] = u"cython.parallel.%s" % name
elif (len(directive) != 3 or
directive[-1] not in self.valid_parallel_directives):
error(pos, "No such directive: %s" % full_name)
self.module_scope.use_utility_code(
UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
return result
def visit_CImportStatNode(self, node):
if node.module_name == u"cython":
self.cython_module_names.add(node.as_name or u"cython")
elif node.module_name.startswith(u"cython."):
if node.module_name.startswith(u"cython.parallel."):
error(node.pos, node.module_name + " is not a module")
if node.module_name == u"cython.parallel":
if node.as_name and node.as_name != u"cython":
self.parallel_directives[node.as_name] = node.module_name
else:
self.cython_module_names.add(u"cython")
self.parallel_directives[
u"cython.parallel"] = node.module_name
self.module_scope.use_utility_code(
UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
elif node.as_name:
self.directive_names[node.as_name] = node.module_name[7:]
else:
self.cython_module_names.add(u"cython")
# if this cimport was a compiler directive, we don't
# want to leave the cimport node sitting in the tree
return None
return node
def visit_FromCImportStatNode(self, node):
if not node.relative_level and (
node.module_name == u"cython" or node.module_name.startswith(u"cython.")):
submodule = (node.module_name + u".")[7:]
newimp = []
for pos, name, as_name, kind in node.imported_names:
full_name = submodule + name
qualified_name = u"cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
# from cython cimport parallel, or
# from cython.parallel cimport parallel, prange, ...
self.parallel_directives[as_name or name] = qualified_name
elif self.is_cython_directive(full_name):
self.directive_names[as_name or name] = full_name
if kind is not None:
self.context.nonfatal_error(PostParseError(pos,
"Compiler directive imports must be plain imports"))
else:
newimp.append((pos, name, as_name, kind))
if not newimp:
return None
node.imported_names = newimp
return node
def visit_FromImportStatNode(self, node):
if (node.module.module_name.value == u"cython") or \
node.module.module_name.value.startswith(u"cython."):
submodule = (node.module.module_name.value + u".")[7:]
newimp = []
for name, name_node in node.items:
full_name = submodule + name
qualified_name = u"cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
self.parallel_directives[name_node.name] = qualified_name
elif self.is_cython_directive(full_name):
self.directive_names[name_node.name] = full_name
else:
newimp.append((name, name_node))
if not newimp:
return None
node.items = newimp
return node
def visit_SingleAssignmentNode(self, node):
if isinstance(node.rhs, ExprNodes.ImportNode):
module_name = node.rhs.module_name.value
is_parallel = (module_name + u".").startswith(u"cython.parallel.")
if module_name != u"cython" and not is_parallel:
return node
module_name = node.rhs.module_name.value
as_name = node.lhs.name
node = Nodes.CImportStatNode(node.pos,
module_name = module_name,
as_name = as_name)
node = self.visit_CImportStatNode(node)
else:
self.visitchildren(node)
return node
def visit_NameNode(self, node):
if node.name in self.cython_module_names:
node.is_cython_module = True
else:
node.cython_attribute = self.directive_names.get(node.name)
return node
def try_to_parse_directives(self, node):
# If node is the contents of an directive (in a with statement or
# decorator), returns a list of (directivename, value) pairs.
# Otherwise, returns None
if isinstance(node, ExprNodes.CallNode):
self.visit(node.function)
optname = node.function.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype:
args, kwds = node.explicit_args_kwds()
directives = []
key_value_pairs = []
if kwds is not None and directivetype is not dict:
for keyvalue in kwds.key_value_pairs:
key, value = keyvalue
sub_optname = "%s.%s" % (optname, key.value)
if Options.directive_types.get(sub_optname):
directives.append(self.try_to_parse_directive(sub_optname, [value], None, keyvalue.pos))
else:
key_value_pairs.append(keyvalue)
if not key_value_pairs:
kwds = None
else:
kwds.key_value_pairs = key_value_pairs
if directives and not kwds and not args:
return directives
directives.append(self.try_to_parse_directive(optname, args, kwds, node.function.pos))
return directives
elif isinstance(node, (ExprNodes.AttributeNode, ExprNodes.NameNode)):
self.visit(node)
optname = node.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype is bool:
return [(optname, True)]
elif directivetype is None:
return [(optname, None)]
else:
raise PostParseError(
node.pos, "The '%s' directive should be used as a function call." % optname)
return None
def try_to_parse_directive(self, optname, args, kwds, pos):
directivetype = Options.directive_types.get(optname)
if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode):
return optname, Options.directive_defaults[optname]
elif directivetype is bool:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode):
raise PostParseError(pos,
'The %s directive takes one compile-time boolean argument' % optname)
return (optname, args[0].value)
elif directivetype is int:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.IntNode):
raise PostParseError(pos,
'The %s directive takes one compile-time integer argument' % optname)
return (optname, int(args[0].value))
elif directivetype is str:
if kwds is not None or len(args) != 1 or not isinstance(
args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, str(args[0].value))
elif directivetype is type:
if kwds is not None or len(args) != 1:
raise PostParseError(pos,
'The %s directive takes one type argument' % optname)
return (optname, args[0])
elif directivetype is dict:
if len(args) != 0:
raise PostParseError(pos,
'The %s directive takes no prepositional arguments' % optname)
return optname, dict([(key.value, value) for key, value in kwds.key_value_pairs])
elif directivetype is list:
if kwds and len(kwds) != 0:
raise PostParseError(pos,
'The %s directive takes no keyword arguments' % optname)
return optname, [ str(arg.value) for arg in args ]
elif callable(directivetype):
if kwds is not None or len(args) != 1 or not isinstance(
args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, directivetype(optname, str(args[0].value)))
else:
assert False
def visit_with_directives(self, body, directives):
olddirectives = self.directives
newdirectives = copy.copy(olddirectives)
newdirectives.update(directives)
self.directives = newdirectives
assert isinstance(body, Nodes.StatListNode), body
retbody = self.visit_Node(body)
directive = Nodes.CompilerDirectivesNode(pos=retbody.pos, body=retbody,
directives=newdirectives)
self.directives = olddirectives
return directive
# Handle decorators
def visit_FuncDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CVarDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
return node
for name, value in directives.items():
if name == 'locals':
node.directive_locals = value
elif name not in ('final', 'staticmethod'):
self.context.nonfatal_error(PostParseError(
node.pos,
"Cdef functions can only take cython.locals(), "
"staticmethod, or final decorators, got %s." % name))
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CClassDefNode(self, node):
directives = self._extract_directives(node, 'cclass')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CppClassNode(self, node):
directives = self._extract_directives(node, 'cppclass')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_PyClassDefNode(self, node):
directives = self._extract_directives(node, 'class')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def _extract_directives(self, node, scope_name):
if not node.decorators:
return {}
# Split the decorators into two lists -- real decorators and directives
directives = []
realdecs = []
both = []
for dec in node.decorators:
new_directives = self.try_to_parse_directives(dec.decorator)
if new_directives is not None:
for directive in new_directives:
if self.check_directive_scope(node.pos, directive[0], scope_name):
name, value = directive
if self.directives.get(name, object()) != value:
directives.append(directive)
if directive[0] == 'staticmethod':
both.append(dec)
else:
realdecs.append(dec)
if realdecs and isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode)):
raise PostParseError(realdecs[0].pos, "Cdef functions/classes cannot take arbitrary decorators.")
else:
node.decorators = realdecs + both
# merge or override repeated directives
optdict = {}
directives.reverse() # Decorators coming first take precedence
for directive in directives:
name, value = directive
if name in optdict:
old_value = optdict[name]
# keywords and arg lists can be merged, everything
# else overrides completely
if isinstance(old_value, dict):
old_value.update(value)
elif isinstance(old_value, list):
old_value.extend(value)
else:
optdict[name] = value
else:
optdict[name] = value
return optdict
# Handle with statements
def visit_WithStatNode(self, node):
directive_dict = {}
for directive in self.try_to_parse_directives(node.manager) or []:
if directive is not None:
if node.target is not None:
self.context.nonfatal_error(
PostParseError(node.pos, "Compiler directive with statements cannot contain 'as'"))
else:
name, value = directive
if name in ('nogil', 'gil'):
# special case: in pure mode, "with nogil" spells "with cython.nogil"
node = Nodes.GILStatNode(node.pos, state = name, body = node.body)
return self.visit_Node(node)
if self.check_directive_scope(node.pos, name, 'with statement'):
directive_dict[name] = value
if directive_dict:
return self.visit_with_directives(node.body, directive_dict)
return self.visit_Node(node)
class ParallelRangeTransform(CythonTransform, SkipDeclarations):
"""
Transform cython.parallel stuff. The parallel_directives come from the
module node, set there by InterpretCompilerDirectives.
x = cython.parallel.threadavailable() -> ParallelThreadAvailableNode
with nogil, cython.parallel.parallel(): -> ParallelWithBlockNode
print cython.parallel.threadid() -> ParallelThreadIdNode
for i in cython.parallel.prange(...): -> ParallelRangeNode
...
"""
# a list of names, maps 'cython.parallel.prange' in the code to
# ['cython', 'parallel', 'prange']
parallel_directive = None
# Indicates whether a namenode in an expression is the cython module
namenode_is_cython_module = False
# Keep track of whether we are the context manager of a 'with' statement
in_context_manager_section = False
# One of 'prange' or 'with parallel'. This is used to disallow closely
# nested 'with parallel:' blocks
state = None
directive_to_node = {
u"cython.parallel.parallel": Nodes.ParallelWithBlockNode,
# u"cython.parallel.threadsavailable": ExprNodes.ParallelThreadsAvailableNode,
u"cython.parallel.threadid": ExprNodes.ParallelThreadIdNode,
u"cython.parallel.prange": Nodes.ParallelRangeNode,
}
def node_is_parallel_directive(self, node):
return node.name in self.parallel_directives or node.is_cython_module
def get_directive_class_node(self, node):
"""
Figure out which parallel directive was used and return the associated
Node class.
E.g. for a cython.parallel.prange() call we return ParallelRangeNode
"""
if self.namenode_is_cython_module:
directive = '.'.join(self.parallel_directive)
else:
directive = self.parallel_directives[self.parallel_directive[0]]
directive = '%s.%s' % (directive,
'.'.join(self.parallel_directive[1:]))
directive = directive.rstrip('.')
cls = self.directive_to_node.get(directive)
if cls is None and not (self.namenode_is_cython_module and
self.parallel_directive[0] != 'parallel'):
error(node.pos, "Invalid directive: %s" % directive)
self.namenode_is_cython_module = False
self.parallel_directive = None
return cls
def visit_ModuleNode(self, node):
"""
If any parallel directives were imported, copy them over and visit
the AST
"""
if node.parallel_directives:
self.parallel_directives = node.parallel_directives
return self.visit_Node(node)
# No parallel directives were imported, so they can't be used :)
return node
def visit_NameNode(self, node):
if self.node_is_parallel_directive(node):
self.parallel_directive = [node.name]
self.namenode_is_cython_module = node.is_cython_module
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
if self.parallel_directive:
self.parallel_directive.append(node.attribute)
return node
def visit_CallNode(self, node):
self.visit(node.function)
if not self.parallel_directive:
return node
# We are a parallel directive, replace this node with the
# corresponding ParallelSomethingSomething node
if isinstance(node, ExprNodes.GeneralCallNode):
args = node.positional_args.args
kwargs = node.keyword_args
else:
args = node.args
kwargs = {}
parallel_directive_class = self.get_directive_class_node(node)
if parallel_directive_class:
# Note: in case of a parallel() the body is set by
# visit_WithStatNode
node = parallel_directive_class(node.pos, args=args, kwargs=kwargs)
return node
def visit_WithStatNode(self, node):
"Rewrite with cython.parallel.parallel() blocks"
newnode = self.visit(node.manager)
if isinstance(newnode, Nodes.ParallelWithBlockNode):
if self.state == 'parallel with':
error(node.manager.pos,
"Nested parallel with blocks are disallowed")
self.state = 'parallel with'
body = self.visit(node.body)
self.state = None
newnode.body = body
return newnode
elif self.parallel_directive:
parallel_directive_class = self.get_directive_class_node(node)
if not parallel_directive_class:
# There was an error, stop here and now
return None
if parallel_directive_class is Nodes.ParallelWithBlockNode:
error(node.pos, "The parallel directive must be called")
return None
node.body = self.visit(node.body)
return node
def visit_ForInStatNode(self, node):
"Rewrite 'for i in cython.parallel.prange(...):'"
self.visit(node.iterator)
self.visit(node.target)
in_prange = isinstance(node.iterator.sequence,
Nodes.ParallelRangeNode)
previous_state = self.state
if in_prange:
# This will replace the entire ForInStatNode, so copy the
# attributes
parallel_range_node = node.iterator.sequence
parallel_range_node.target = node.target
parallel_range_node.body = node.body
parallel_range_node.else_clause = node.else_clause
node = parallel_range_node
if not isinstance(node.target, ExprNodes.NameNode):
error(node.target.pos,
"Can only iterate over an iteration variable")
self.state = 'prange'
self.visit(node.body)
self.state = previous_state
self.visit(node.else_clause)
return node
def visit(self, node):
"Visit a node that may be None"
if node is not None:
return super(ParallelRangeTransform, self).visit(node)
class WithTransform(CythonTransform, SkipDeclarations):
def visit_WithStatNode(self, node):
self.visitchildren(node, 'body')
pos = node.pos
is_async = node.is_async
body, target, manager = node.body, node.target, node.manager
node.enter_call = ExprNodes.SimpleCallNode(
pos, function=ExprNodes.AttributeNode(
pos, obj=ExprNodes.CloneNode(manager),
attribute=EncodedString('__aenter__' if is_async else '__enter__'),
is_special_lookup=True),
args=[],
is_temp=True)
if is_async:
node.enter_call = ExprNodes.AwaitExprNode(pos, arg=node.enter_call)
if target is not None:
body = Nodes.StatListNode(
pos, stats=[
Nodes.WithTargetAssignmentStatNode(
pos, lhs=target, with_node=node),
body])
excinfo_target = ExprNodes.TupleNode(pos, slow=True, args=[
ExprNodes.ExcValueNode(pos) for _ in range(3)])
except_clause = Nodes.ExceptClauseNode(
pos, body=Nodes.IfStatNode(
pos, if_clauses=[
Nodes.IfClauseNode(
pos, condition=ExprNodes.NotNode(
pos, operand=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=False,
args=excinfo_target,
await=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
body=Nodes.ReraiseStatNode(pos),
),
],
else_clause=None),
pattern=None,
target=None,
excinfo_target=excinfo_target,
)
node.body = Nodes.TryFinallyStatNode(
pos, body=Nodes.TryExceptStatNode(
pos, body=body,
except_clauses=[except_clause],
else_clause=None,
),
finally_clause=Nodes.ExprStatNode(
pos, expr=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=True,
args=ExprNodes.TupleNode(
pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]),
await=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
handle_error_case=False,
)
return node
def visit_ExprNode(self, node):
# With statements are never inside expressions.
return node
class DecoratorTransform(ScopeTrackingTransform, SkipDeclarations):
"""Originally, this was the only place where decorators were
transformed into the corresponding calling code. Now, this is
done directly in DefNode and PyClassDefNode to avoid reassignments
to the function/class name - except for cdef class methods. For
those, the reassignment is required as methods are originally
defined in the PyMethodDef struct.
The IndirectionNode allows DefNode to override the decorator
"""
def visit_DefNode(self, func_node):
scope_type = self.scope_type
func_node = self.visit_FuncDefNode(func_node)
if scope_type != 'cclass' or not func_node.decorators:
return func_node
return self.handle_decorators(func_node, func_node.decorators,
func_node.name)
def handle_decorators(self, node, decorators, name):
decorator_result = ExprNodes.NameNode(node.pos, name = name)
for decorator in decorators[::-1]:
decorator_result = ExprNodes.SimpleCallNode(
decorator.pos,
function = decorator.decorator,
args = [decorator_result])
name_node = ExprNodes.NameNode(node.pos, name = name)
reassignment = Nodes.SingleAssignmentNode(
node.pos,
lhs = name_node,
rhs = decorator_result)
reassignment = Nodes.IndirectionNode([reassignment])
node.decorator_indirection = reassignment
return [node, reassignment]
class CnameDirectivesTransform(CythonTransform, SkipDeclarations):
"""
Only part of the CythonUtilityCode pipeline. Must be run before
DecoratorTransform in case this is a decorator for a cdef class.
It filters out @cname('my_cname') decorators and rewrites them to
CnameDecoratorNodes.
"""
def handle_function(self, node):
if not getattr(node, 'decorators', None):
return self.visit_Node(node)
for i, decorator in enumerate(node.decorators):
decorator = decorator.decorator
if (isinstance(decorator, ExprNodes.CallNode) and
decorator.function.is_name and
decorator.function.name == 'cname'):
args, kwargs = decorator.explicit_args_kwds()
if kwargs:
raise AssertionError(
"cname decorator does not take keyword arguments")
if len(args) != 1:
raise AssertionError(
"cname decorator takes exactly one argument")
if not (args[0].is_literal and
args[0].type == Builtin.str_type):
raise AssertionError(
"argument to cname decorator must be a string literal")
cname = args[0].compile_time_value(None)
del node.decorators[i]
node = Nodes.CnameDecoratorNode(pos=node.pos, node=node,
cname=cname)
break
return self.visit_Node(node)
visit_FuncDefNode = handle_function
visit_CClassDefNode = handle_function
visit_CEnumDefNode = handle_function
visit_CStructOrUnionDefNode = handle_function
class ForwardDeclareTypes(CythonTransform):
def visit_CompilerDirectivesNode(self, node):
env = self.module_scope
old = env.directives
env.directives = node.directives
self.visitchildren(node)
env.directives = old
return node
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.module_scope.directives = node.directives
self.visitchildren(node)
return node
def visit_CDefExternNode(self, node):
old_cinclude_flag = self.module_scope.in_cinclude
self.module_scope.in_cinclude = 1
self.visitchildren(node)
self.module_scope.in_cinclude = old_cinclude_flag
return node
def visit_CEnumDefNode(self, node):
node.declare(self.module_scope)
return node
def visit_CStructOrUnionDefNode(self, node):
if node.name not in self.module_scope.entries:
node.declare(self.module_scope)
return node
def visit_CClassDefNode(self, node):
if node.class_name not in self.module_scope.entries:
node.declare(self.module_scope)
return node
class AnalyseDeclarationsTransform(EnvTransform):
basic_property = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
def __set__(self, value):
ATTR = value
""", level='c_class', pipeline=[NormalizeTree(None)])
basic_pyobject_property = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
def __set__(self, value):
ATTR = value
def __del__(self):
ATTR = None
""", level='c_class', pipeline=[NormalizeTree(None)])
basic_property_ro = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
""", level='c_class', pipeline=[NormalizeTree(None)])
struct_or_union_wrapper = TreeFragment(u"""
cdef class NAME:
cdef TYPE value
def __init__(self, MEMBER=None):
cdef int count
count = 0
INIT_ASSIGNMENTS
if IS_UNION and count > 1:
raise ValueError, "At most one union member should be specified."
def __str__(self):
return STR_FORMAT % MEMBER_TUPLE
def __repr__(self):
return REPR_FORMAT % MEMBER_TUPLE
""", pipeline=[NormalizeTree(None)])
init_assignment = TreeFragment(u"""
if VALUE is not None:
ATTR = VALUE
count += 1
""", pipeline=[NormalizeTree(None)])
fused_function = None
in_lambda = 0
def __call__(self, root):
# needed to determine if a cdef var is declared after it's used.
self.seen_vars_stack = []
self.fused_error_funcs = set()
super_class = super(AnalyseDeclarationsTransform, self)
self._super_visit_FuncDefNode = super_class.visit_FuncDefNode
return super_class.__call__(root)
def visit_NameNode(self, node):
self.seen_vars_stack[-1].add(node.name)
return node
def visit_ModuleNode(self, node):
self.seen_vars_stack.append(set())
node.analyse_declarations(self.current_env())
self.visitchildren(node)
self.seen_vars_stack.pop()
return node
def visit_LambdaNode(self, node):
self.in_lambda += 1
node.analyse_declarations(self.current_env())
self.visitchildren(node)
self.in_lambda -= 1
return node
def visit_CClassDefNode(self, node):
node = self.visit_ClassDefNode(node)
if node.scope and node.scope.implemented and node.body:
stats = []
for entry in node.scope.var_entries:
if entry.needs_property:
property = self.create_Property(entry)
property.analyse_declarations(node.scope)
self.visit(property)
stats.append(property)
if stats:
node.body.stats += stats
return node
def _handle_fused_def_decorators(self, old_decorators, env, node):
"""
Create function calls to the decorators and reassignments to
the function.
"""
# Delete staticmethod and classmethod decorators, this is
# handled directly by the fused function object.
decorators = []
for decorator in old_decorators:
func = decorator.decorator
if (not func.is_name or
func.name not in ('staticmethod', 'classmethod') or
env.lookup_here(func.name)):
# not a static or classmethod
decorators.append(decorator)
if decorators:
transform = DecoratorTransform(self.context)
def_node = node.node
_, reassignments = transform.handle_decorators(
def_node, decorators, def_node.name)
reassignments.analyse_declarations(env)
node = [node, reassignments]
return node
def _handle_def(self, decorators, env, node):
"Handle def or cpdef fused functions"
# Create PyCFunction nodes for each specialization
node.stats.insert(0, node.py_func)
node.py_func = self.visit(node.py_func)
node.update_fused_defnode_entry(env)
pycfunc = ExprNodes.PyCFunctionNode.from_defnode(node.py_func,
True)
pycfunc = ExprNodes.ProxyNode(pycfunc.coerce_to_temp(env))
node.resulting_fused_function = pycfunc
# Create assignment node for our def function
node.fused_func_assignment = self._create_assignment(
node.py_func, ExprNodes.CloneNode(pycfunc), env)
if decorators:
node = self._handle_fused_def_decorators(decorators, env, node)
return node
def _create_fused_function(self, env, node):
"Create a fused function for a DefNode with fused arguments"
from . import FusedNode
if self.fused_function or self.in_lambda:
if self.fused_function not in self.fused_error_funcs:
if self.in_lambda:
error(node.pos, "Fused lambdas not allowed")
else:
error(node.pos, "Cannot nest fused functions")
self.fused_error_funcs.add(self.fused_function)
node.body = Nodes.PassStatNode(node.pos)
for arg in node.args:
if arg.type.is_fused:
arg.type = arg.type.get_fused_types()[0]
return node
decorators = getattr(node, 'decorators', None)
node = FusedNode.FusedCFuncDefNode(node, env)
self.fused_function = node
self.visitchildren(node)
self.fused_function = None
if node.py_func:
node = self._handle_def(decorators, env, node)
return node
def _handle_nogil_cleanup(self, lenv, node):
"Handle cleanup for 'with gil' blocks in nogil functions."
if lenv.nogil and lenv.has_with_gil_block:
# Acquire the GIL for cleanup in 'nogil' functions, by wrapping
# the entire function body in try/finally.
# The corresponding release will be taken care of by
# Nodes.FuncDefNode.generate_function_definitions()
node.body = Nodes.NogilTryFinallyStatNode(
node.body.pos,
body=node.body,
finally_clause=Nodes.EnsureGILNode(node.body.pos),
finally_except_clause=Nodes.EnsureGILNode(node.body.pos))
def _handle_fused(self, node):
if node.is_generator and node.has_fused_arguments:
node.has_fused_arguments = False
error(node.pos, "Fused generators not supported")
node.gbody = Nodes.StatListNode(node.pos,
stats=[],
body=Nodes.PassStatNode(node.pos))
return node.has_fused_arguments
def visit_FuncDefNode(self, node):
"""
Analyse a function and its body, as that hasn't happend yet. Also
analyse the directive_locals set by @cython.locals().
Then, if we are a function with fused arguments, replace the function
(after it has declared itself in the symbol table!) with a
FusedCFuncDefNode, and analyse its children (which are in turn normal
functions). If we're a normal function, just analyse the body of the
function.
"""
env = self.current_env()
self.seen_vars_stack.append(set())
lenv = node.local_scope
node.declare_arguments(lenv)
# @cython.locals(...)
for var, type_node in node.directive_locals.items():
if not lenv.lookup_here(var): # don't redeclare args
type = type_node.analyse_as_type(lenv)
if type:
lenv.declare_var(var, type, type_node.pos)
else:
error(type_node.pos, "Not a type")
if self._handle_fused(node):
node = self._create_fused_function(env, node)
else:
node.body.analyse_declarations(lenv)
self._handle_nogil_cleanup(lenv, node)
self._super_visit_FuncDefNode(node)
self.seen_vars_stack.pop()
return node
def visit_DefNode(self, node):
node = self.visit_FuncDefNode(node)
env = self.current_env()
if (not isinstance(node, Nodes.DefNode) or
node.fused_py_func or node.is_generator_body or
not node.needs_assignment_synthesis(env)):
return node
return [node, self._synthesize_assignment(node, env)]
def visit_GeneratorBodyDefNode(self, node):
return self.visit_FuncDefNode(node)
def _synthesize_assignment(self, node, env):
# Synthesize assignment node and put it right after defnode
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if genv.is_closure_scope:
rhs = node.py_cfunc_node = ExprNodes.InnerFunctionNode(
node.pos, def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
code_object=ExprNodes.CodeObjectNode(node))
else:
binding = self.current_directives.get('binding')
rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding)
node.code_object = rhs.code_object
if env.is_py_class_scope:
rhs.binding = True
node.is_cyfunction = rhs.binding
return self._create_assignment(node, rhs, env)
def _create_assignment(self, def_node, rhs, env):
if def_node.decorators:
for decorator in def_node.decorators[::-1]:
rhs = ExprNodes.SimpleCallNode(
decorator.pos,
function = decorator.decorator,
args = [rhs])
def_node.decorators = None
assmt = Nodes.SingleAssignmentNode(
def_node.pos,
lhs=ExprNodes.NameNode(def_node.pos, name=def_node.name),
rhs=rhs)
assmt.analyse_declarations(env)
return assmt
def visit_ScopedExprNode(self, node):
env = self.current_env()
node.analyse_declarations(env)
# the node may or may not have a local scope
if node.has_local_scope:
self.seen_vars_stack.append(set(self.seen_vars_stack[-1]))
self.enter_scope(node, node.expr_scope)
node.analyse_scoped_declarations(node.expr_scope)
self.visitchildren(node)
self.exit_scope()
self.seen_vars_stack.pop()
else:
node.analyse_scoped_declarations(env)
self.visitchildren(node)
return node
def visit_TempResultFromStatNode(self, node):
self.visitchildren(node)
node.analyse_declarations(self.current_env())
return node
def visit_CppClassNode(self, node):
if node.visibility == 'extern':
return None
else:
return self.visit_ClassDefNode(node)
def visit_CStructOrUnionDefNode(self, node):
# Create a wrapper node if needed.
# We want to use the struct type information (so it can't happen
# before this phase) but also create new objects to be declared
# (so it can't happen later).
# Note that we don't return the original node, as it is
# never used after this phase.
if True: # private (default)
return None
self_value = ExprNodes.AttributeNode(
pos = node.pos,
obj = ExprNodes.NameNode(pos=node.pos, name=u"self"),
attribute = EncodedString(u"value"))
var_entries = node.entry.type.scope.var_entries
attributes = []
for entry in var_entries:
attributes.append(ExprNodes.AttributeNode(pos = entry.pos,
obj = self_value,
attribute = entry.name))
# __init__ assignments
init_assignments = []
for entry, attr in zip(var_entries, attributes):
# TODO: branch on visibility
init_assignments.append(self.init_assignment.substitute({
u"VALUE": ExprNodes.NameNode(entry.pos, name = entry.name),
u"ATTR": attr,
}, pos = entry.pos))
# create the class
str_format = u"%s(%s)" % (node.entry.type.name, ("%s, " * len(attributes))[:-2])
wrapper_class = self.struct_or_union_wrapper.substitute({
u"INIT_ASSIGNMENTS": Nodes.StatListNode(node.pos, stats = init_assignments),
u"IS_UNION": ExprNodes.BoolNode(node.pos, value = not node.entry.type.is_struct),
u"MEMBER_TUPLE": ExprNodes.TupleNode(node.pos, args=attributes),
u"STR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format)),
u"REPR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format.replace("%s", "%r"))),
}, pos = node.pos).stats[0]
wrapper_class.class_name = node.name
wrapper_class.shadow = True
class_body = wrapper_class.body.stats
# fix value type
assert isinstance(class_body[0].base_type, Nodes.CSimpleBaseTypeNode)
class_body[0].base_type.name = node.name
# fix __init__ arguments
init_method = class_body[1]
assert isinstance(init_method, Nodes.DefNode) and init_method.name == '__init__'
arg_template = init_method.args[1]
if not node.entry.type.is_struct:
arg_template.kw_only = True
del init_method.args[1]
for entry, attr in zip(var_entries, attributes):
arg = copy.deepcopy(arg_template)
arg.declarator.name = entry.name
init_method.args.append(arg)
# setters/getters
for entry, attr in zip(var_entries, attributes):
# TODO: branch on visibility
if entry.type.is_pyobject:
template = self.basic_pyobject_property
else:
template = self.basic_property
property = template.substitute({
u"ATTR": attr,
}, pos = entry.pos).stats[0]
property.name = entry.name
wrapper_class.body.stats.append(property)
wrapper_class.analyse_declarations(self.current_env())
return self.visit_CClassDefNode(wrapper_class)
# Some nodes are no longer needed after declaration
# analysis and can be dropped. The analysis was performed
# on these nodes in a seperate recursive process from the
# enclosing function or module, so we can simply drop them.
def visit_CDeclaratorNode(self, node):
# necessary to ensure that all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return node
def visit_CTypeDefNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return None
def visit_CEnumDefNode(self, node):
if node.visibility == 'public':
return node
else:
return None
def visit_CNameDeclaratorNode(self, node):
if node.name in self.seen_vars_stack[-1]:
entry = self.current_env().lookup(node.name)
if (entry is None or entry.visibility != 'extern'
and not entry.scope.is_c_class_scope):
warning(node.pos, "cdef variable '%s' declared after it is used" % node.name, 2)
self.visitchildren(node)
return node
def visit_CVarDefNode(self, node):
# to ensure all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return None
def visit_CnameDecoratorNode(self, node):
child_node = self.visit(node.node)
if not child_node:
return None
if type(child_node) is list: # Assignment synthesized
node.child_node = child_node[0]
return [node] + child_node[1:]
node.node = child_node
return node
def create_Property(self, entry):
if entry.visibility == 'public':
if entry.type.is_pyobject:
template = self.basic_pyobject_property
else:
template = self.basic_property
elif entry.visibility == 'readonly':
template = self.basic_property_ro
property = template.substitute({
u"ATTR": ExprNodes.AttributeNode(pos=entry.pos,
obj=ExprNodes.NameNode(pos=entry.pos, name="self"),
attribute=entry.name),
}, pos=entry.pos).stats[0]
property.name = entry.name
property.doc = entry.doc
return property
class CalculateQualifiedNamesTransform(EnvTransform):
"""
Calculate and store the '__qualname__' and the global
module name on some nodes.
"""
def visit_ModuleNode(self, node):
self.module_name = self.global_scope().qualified_name
self.qualified_name = []
_super = super(CalculateQualifiedNamesTransform, self)
self._super_visit_FuncDefNode = _super.visit_FuncDefNode
self._super_visit_ClassDefNode = _super.visit_ClassDefNode
self.visitchildren(node)
return node
def _set_qualname(self, node, name=None):
if name:
qualname = self.qualified_name[:]
qualname.append(name)
else:
qualname = self.qualified_name
node.qualname = EncodedString('.'.join(qualname))
node.module_name = self.module_name
def _append_entry(self, entry):
if entry.is_pyglobal and not entry.is_pyclass_attr:
self.qualified_name = [entry.name]
else:
self.qualified_name.append(entry.name)
def visit_ClassNode(self, node):
self._set_qualname(node, node.name)
self.visitchildren(node)
return node
def visit_PyClassNamespaceNode(self, node):
# class name was already added by parent node
self._set_qualname(node)
self.visitchildren(node)
return node
def visit_PyCFunctionNode(self, node):
self._set_qualname(node, node.def_node.name)
self.visitchildren(node)
return node
def visit_DefNode(self, node):
self._set_qualname(node, node.name)
return self.visit_FuncDefNode(node)
def visit_FuncDefNode(self, node):
orig_qualified_name = self.qualified_name[:]
if getattr(node, 'name', None) == '<lambda>':
self.qualified_name.append('<lambda>')
else:
self._append_entry(node.entry)
self.qualified_name.append('<locals>')
self._super_visit_FuncDefNode(node)
self.qualified_name = orig_qualified_name
return node
def visit_ClassDefNode(self, node):
orig_qualified_name = self.qualified_name[:]
entry = (getattr(node, 'entry', None) or # PyClass
self.current_env().lookup_here(node.name)) # CClass
self._append_entry(entry)
self._super_visit_ClassDefNode(node)
self.qualified_name = orig_qualified_name
return node
class AnalyseExpressionsTransform(CythonTransform):
def visit_ModuleNode(self, node):
node.scope.infer_types()
node.body = node.body.analyse_expressions(node.scope)
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
node.local_scope.infer_types()
node.body = node.body.analyse_expressions(node.local_scope)
self.visitchildren(node)
return node
def visit_ScopedExprNode(self, node):
if node.has_local_scope:
node.expr_scope.infer_types()
node = node.analyse_scoped_expressions(node.expr_scope)
self.visitchildren(node)
return node
def visit_IndexNode(self, node):
"""
Replace index nodes used to specialize cdef functions with fused
argument types with the Attribute- or NameNode referring to the
function. We then need to copy over the specialization properties to
the attribute or name node.
Because the indexing might be a Python indexing operation on a fused
function, or (usually) a Cython indexing operation, we need to
re-analyse the types.
"""
self.visit_Node(node)
if node.is_fused_index and not node.type.is_error:
node = node.base
return node
class FindInvalidUseOfFusedTypes(CythonTransform):
def visit_FuncDefNode(self, node):
# Errors related to use in functions with fused args will already
# have been detected
if not node.has_fused_arguments:
if not node.is_generator_body and node.return_type.is_fused:
error(node.pos, "Return type is not specified as argument type")
else:
self.visitchildren(node)
return node
def visit_ExprNode(self, node):
if node.type and node.type.is_fused:
error(node.pos, "Invalid use of fused types, type cannot be specialized")
else:
self.visitchildren(node)
return node
class ExpandInplaceOperators(EnvTransform):
def visit_InPlaceAssignmentNode(self, node):
lhs = node.lhs
rhs = node.rhs
if lhs.type.is_cpp_class:
# No getting around this exact operator here.
return node
if isinstance(lhs, ExprNodes.BufferIndexNode):
# There is code to handle this case in InPlaceAssignmentNode
return node
env = self.current_env()
def side_effect_free_reference(node, setting=False):
if node.is_name:
return node, []
elif node.type.is_pyobject and not setting:
node = LetRefNode(node)
return node, [node]
elif node.is_subscript:
base, temps = side_effect_free_reference(node.base)
index = LetRefNode(node.index)
return ExprNodes.IndexNode(node.pos, base=base, index=index), temps + [index]
elif node.is_attribute:
obj, temps = side_effect_free_reference(node.obj)
return ExprNodes.AttributeNode(node.pos, obj=obj, attribute=node.attribute), temps
elif isinstance(node, ExprNodes.BufferIndexNode):
raise ValueError("Don't allow things like attributes of buffer indexing operations")
else:
node = LetRefNode(node)
return node, [node]
try:
lhs, let_ref_nodes = side_effect_free_reference(lhs, setting=True)
except ValueError:
return node
dup = lhs.__class__(**lhs.__dict__)
binop = ExprNodes.binop_node(node.pos,
operator = node.operator,
operand1 = dup,
operand2 = rhs,
inplace=True)
# Manually analyse types for new node.
lhs.analyse_target_types(env)
dup.analyse_types(env)
binop.analyse_operation(env)
node = Nodes.SingleAssignmentNode(
node.pos,
lhs = lhs,
rhs=binop.coerce_to(lhs.type, env))
# Use LetRefNode to avoid side effects.
let_ref_nodes.reverse()
for t in let_ref_nodes:
node = LetNode(t, node)
return node
def visit_ExprNode(self, node):
# In-place assignments can't happen within an expression.
return node
class AdjustDefByDirectives(CythonTransform, SkipDeclarations):
"""
Adjust function and class definitions by the decorator directives:
@cython.cfunc
@cython.cclass
@cython.ccall
@cython.inline
"""
def visit_ModuleNode(self, node):
self.directives = node.directives
self.in_py_class = False
self.visitchildren(node)
return node
def visit_CompilerDirectivesNode(self, node):
old_directives = self.directives
self.directives = node.directives
self.visitchildren(node)
self.directives = old_directives
return node
def visit_DefNode(self, node):
modifiers = []
if 'inline' in self.directives:
modifiers.append('inline')
if 'ccall' in self.directives:
node = node.as_cfunction(
overridable=True, returns=self.directives.get('returns'), modifiers=modifiers)
return self.visit(node)
if 'cfunc' in self.directives:
if self.in_py_class:
error(node.pos, "cfunc directive is not allowed here")
else:
node = node.as_cfunction(
overridable=False, returns=self.directives.get('returns'), modifiers=modifiers)
return self.visit(node)
if 'inline' in modifiers:
error(node.pos, "Python functions cannot be declared 'inline'")
self.visitchildren(node)
return node
def visit_PyClassDefNode(self, node):
if 'cclass' in self.directives:
node = node.as_cclass()
return self.visit(node)
else:
old_in_pyclass = self.in_py_class
self.in_py_class = True
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
def visit_CClassDefNode(self, node):
old_in_pyclass = self.in_py_class
self.in_py_class = False
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
class AlignFunctionDefinitions(CythonTransform):
"""
This class takes the signatures from a .pxd file and applies them to
the def methods in a .py file.
"""
def visit_ModuleNode(self, node):
self.scope = node.scope
self.directives = node.directives
self.imported_names = set() # hack, see visit_FromImportStatNode()
self.visitchildren(node)
return node
def visit_PyClassDefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def:
if pxd_def.is_cclass:
return self.visit_CClassDefNode(node.as_cclass(), pxd_def)
elif not pxd_def.scope or not pxd_def.scope.is_builtin_scope:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
return node
def visit_CClassDefNode(self, node, pxd_def=None):
if pxd_def is None:
pxd_def = self.scope.lookup(node.class_name)
if pxd_def:
outer_scope = self.scope
self.scope = pxd_def.type.scope
self.visitchildren(node)
if pxd_def:
self.scope = outer_scope
return node
def visit_DefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def and (not pxd_def.scope or not pxd_def.scope.is_builtin_scope):
if not pxd_def.is_cfunction:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
node = node.as_cfunction(pxd_def)
elif (self.scope.is_module_scope and self.directives['auto_cpdef']
and not node.name in self.imported_names
and node.is_cdef_func_compatible()):
# FIXME: cpdef-ing should be done in analyse_declarations()
node = node.as_cfunction(scope=self.scope)
# Enable this when nested cdef functions are allowed.
# self.visitchildren(node)
return node
def visit_FromImportStatNode(self, node):
# hack to prevent conditional import fallback functions from
# being cdpef-ed (global Python variables currently conflict
# with imports)
if self.scope.is_module_scope:
for name, _ in node.items:
self.imported_names.add(name)
return node
def visit_ExprNode(self, node):
# ignore lambdas and everything else that appears in expressions
return node
class RemoveUnreachableCode(CythonTransform):
def visit_StatListNode(self, node):
if not self.current_directives['remove_unreachable']:
return node
self.visitchildren(node)
for idx, stat in enumerate(node.stats):
idx += 1
if stat.is_terminator:
if idx < len(node.stats):
if self.current_directives['warn.unreachable']:
warning(node.stats[idx].pos, "Unreachable code", 2)
node.stats = node.stats[:idx]
node.is_terminator = True
break
return node
def visit_IfClauseNode(self, node):
self.visitchildren(node)
if node.body.is_terminator:
node.is_terminator = True
return node
def visit_IfStatNode(self, node):
self.visitchildren(node)
if node.else_clause and node.else_clause.is_terminator:
for clause in node.if_clauses:
if not clause.is_terminator:
break
else:
node.is_terminator = True
return node
def visit_TryExceptStatNode(self, node):
self.visitchildren(node)
if node.body.is_terminator and node.else_clause:
if self.current_directives['warn.unreachable']:
warning(node.else_clause.pos, "Unreachable code", 2)
node.else_clause = None
return node
class YieldNodeCollector(TreeVisitor):
def __init__(self):
super(YieldNodeCollector, self).__init__()
self.yields = []
self.awaits = []
self.returns = []
self.has_return_value = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_YieldExprNode(self, node):
self.yields.append(node)
self.visitchildren(node)
def visit_AwaitExprNode(self, node):
self.awaits.append(node)
self.visitchildren(node)
def visit_ReturnStatNode(self, node):
self.visitchildren(node)
if node.value:
self.has_return_value = True
self.returns.append(node)
def visit_ClassDefNode(self, node):
pass
def visit_FuncDefNode(self, node):
pass
def visit_LambdaNode(self, node):
pass
def visit_GeneratorExpressionNode(self, node):
pass
def visit_CArgDeclNode(self, node):
# do not look into annotations
# FIXME: support (yield) in default arguments (currently crashes)
pass
class MarkClosureVisitor(CythonTransform):
def visit_ModuleNode(self, node):
self.needs_closure = False
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
collector = YieldNodeCollector()
collector.visitchildren(node)
if node.is_async_def:
if collector.yields:
error(collector.yields[0].pos, "'yield' not allowed in async coroutines (use 'await')")
yields = collector.awaits
elif collector.yields:
if collector.awaits:
error(collector.yields[0].pos, "'await' not allowed in generators (use 'yield')")
yields = collector.yields
else:
return node
for i, yield_expr in enumerate(yields, 1):
yield_expr.label_num = i
for retnode in collector.returns:
retnode.in_generator = True
gbody = Nodes.GeneratorBodyDefNode(
pos=node.pos, name=node.name, body=node.body)
coroutine = (Nodes.AsyncDefNode if node.is_async_def else Nodes.GeneratorDefNode)(
pos=node.pos, name=node.name, args=node.args,
star_arg=node.star_arg, starstar_arg=node.starstar_arg,
doc=node.doc, decorators=node.decorators,
gbody=gbody, lambda_name=node.lambda_name)
return coroutine
def visit_CFuncDefNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
if node.needs_closure and node.overridable:
error(node.pos, "closures inside cpdef functions not yet supported")
return node
def visit_LambdaNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
return node
def visit_ClassDefNode(self, node):
self.visitchildren(node)
self.needs_closure = True
return node
class CreateClosureClasses(CythonTransform):
# Output closure classes in module scope for all functions
# that really need it.
def __init__(self, context):
super(CreateClosureClasses, self).__init__(context)
self.path = []
self.in_lambda = False
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.visitchildren(node)
return node
def find_entries_used_in_closures(self, node):
from_closure = []
in_closure = []
for name, entry in node.local_scope.entries.items():
if entry.from_closure:
from_closure.append((name, entry))
elif entry.in_closure:
in_closure.append((name, entry))
return from_closure, in_closure
def create_class_from_scope(self, node, target_module_scope, inner_node=None):
# move local variables into closure
if node.is_generator:
for entry in node.local_scope.entries.values():
if not entry.from_closure:
entry.in_closure = True
from_closure, in_closure = self.find_entries_used_in_closures(node)
in_closure.sort()
# Now from the begining
node.needs_closure = False
node.needs_outer_scope = False
func_scope = node.local_scope
cscope = node.entry.scope
while cscope.is_py_class_scope or cscope.is_c_class_scope:
cscope = cscope.outer_scope
if not from_closure and (self.path or inner_node):
if not inner_node:
if not node.py_cfunc_node:
raise InternalError("DefNode does not have assignment node")
inner_node = node.py_cfunc_node
inner_node.needs_self_code = False
node.needs_outer_scope = False
if node.is_generator:
pass
elif not in_closure and not from_closure:
return
elif not in_closure:
func_scope.is_passthrough = True
func_scope.scope_class = cscope.scope_class
node.needs_outer_scope = True
return
as_name = '%s_%s' % (
target_module_scope.next_id(Naming.closure_class_prefix),
node.entry.cname)
entry = target_module_scope.declare_c_class(
name=as_name, pos=node.pos, defining=True,
implementing=True)
entry.type.is_final_type = True
func_scope.scope_class = entry
class_scope = entry.type.scope
class_scope.is_internal = True
if Options.closure_freelist_size:
class_scope.directives['freelist'] = Options.closure_freelist_size
if from_closure:
assert cscope.is_closure_scope
class_scope.declare_var(pos=node.pos,
name=Naming.outer_scope_cname,
cname=Naming.outer_scope_cname,
type=cscope.scope_class.type,
is_cdef=True)
node.needs_outer_scope = True
for name, entry in in_closure:
closure_entry = class_scope.declare_var(pos=entry.pos,
name=entry.name,
cname=entry.cname,
type=entry.type,
is_cdef=True)
if entry.is_declared_generic:
closure_entry.is_declared_generic = 1
node.needs_closure = True
# Do it here because other classes are already checked
target_module_scope.check_c_class(func_scope.scope_class)
def visit_LambdaNode(self, node):
if not isinstance(node.def_node, Nodes.DefNode):
# fused function, an error has been previously issued
return node
was_in_lambda = self.in_lambda
self.in_lambda = True
self.create_class_from_scope(node.def_node, self.module_scope, node)
self.visitchildren(node)
self.in_lambda = was_in_lambda
return node
def visit_FuncDefNode(self, node):
if self.in_lambda:
self.visitchildren(node)
return node
if node.needs_closure or self.path:
self.create_class_from_scope(node, self.module_scope)
self.path.append(node)
self.visitchildren(node)
self.path.pop()
return node
def visit_GeneratorBodyDefNode(self, node):
self.visitchildren(node)
return node
def visit_CFuncDefNode(self, node):
if not node.overridable:
return self.visit_FuncDefNode(node)
else:
self.visitchildren(node)
return node
class GilCheck(VisitorTransform):
"""
Call `node.gil_check(env)` on each node to make sure we hold the
GIL when we need it. Raise an error when on Python operations
inside a `nogil` environment.
Additionally, raise exceptions for closely nested with gil or with nogil
statements. The latter would abort Python.
"""
def __call__(self, root):
self.env_stack = [root.scope]
self.nogil = False
# True for 'cdef func() nogil:' functions, as the GIL may be held while
# calling this function (thus contained 'nogil' blocks may be valid).
self.nogil_declarator_only = False
return super(GilCheck, self).__call__(root)
def visit_FuncDefNode(self, node):
self.env_stack.append(node.local_scope)
was_nogil = self.nogil
self.nogil = node.local_scope.nogil
if self.nogil:
self.nogil_declarator_only = True
if self.nogil and node.nogil_check:
node.nogil_check(node.local_scope)
self.visitchildren(node)
# This cannot be nested, so it doesn't need backup/restore
self.nogil_declarator_only = False
self.env_stack.pop()
self.nogil = was_nogil
return node
def visit_GILStatNode(self, node):
if self.nogil and node.nogil_check:
node.nogil_check()
was_nogil = self.nogil
self.nogil = (node.state == 'nogil')
if was_nogil == self.nogil and not self.nogil_declarator_only:
if not was_nogil:
error(node.pos, "Trying to acquire the GIL while it is "
"already held.")
else:
error(node.pos, "Trying to release the GIL while it was "
"previously released.")
if isinstance(node.finally_clause, Nodes.StatListNode):
# The finally clause of the GILStatNode is a GILExitNode,
# which is wrapped in a StatListNode. Just unpack that.
node.finally_clause, = node.finally_clause.stats
self.visitchildren(node)
self.nogil = was_nogil
return node
def visit_ParallelRangeNode(self, node):
if node.nogil:
node.nogil = False
node = Nodes.GILStatNode(node.pos, state='nogil', body=node)
return self.visit_GILStatNode(node)
if not self.nogil:
error(node.pos, "prange() can only be used without the GIL")
# Forget about any GIL-related errors that may occur in the body
return None
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
return node
def visit_ParallelWithBlockNode(self, node):
if not self.nogil:
error(node.pos, "The parallel section may only be used without "
"the GIL")
return None
if node.nogil_check:
# It does not currently implement this, but test for it anyway to
# avoid potential future surprises
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
return node
def visit_TryFinallyStatNode(self, node):
"""
Take care of try/finally statements in nogil code sections.
"""
if not self.nogil or isinstance(node, Nodes.GILStatNode):
return self.visit_Node(node)
node.nogil_check = None
node.is_try_finally_in_nogil = True
self.visitchildren(node)
return node
def visit_Node(self, node):
if self.env_stack and self.nogil and node.nogil_check:
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
node.in_nogil_context = self.nogil
return node
class TransformBuiltinMethods(EnvTransform):
"""
Replace Cython's own cython.* builtins by the corresponding tree nodes.
"""
def visit_SingleAssignmentNode(self, node):
if node.declaration_only:
return None
else:
self.visitchildren(node)
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
return self.visit_cython_attribute(node)
def visit_NameNode(self, node):
return self.visit_cython_attribute(node)
def visit_cython_attribute(self, node):
attribute = node.as_cython_attribute()
if attribute:
if attribute == u'compiled':
node = ExprNodes.BoolNode(node.pos, value=True)
elif attribute == u'__version__':
from .. import __version__ as version
node = ExprNodes.StringNode(node.pos, value=EncodedString(version))
elif attribute == u'NULL':
node = ExprNodes.NullNode(node.pos)
elif attribute in (u'set', u'frozenset', u'staticmethod'):
node = ExprNodes.NameNode(node.pos, name=EncodedString(attribute),
entry=self.current_env().builtin_scope().lookup_here(attribute))
elif PyrexTypes.parse_basic_type(attribute):
pass
elif self.context.cython_scope.lookup_qualified_name(attribute):
pass
else:
error(node.pos, u"'%s' not a valid cython attribute or is being used incorrectly" % attribute)
return node
def visit_ExecStatNode(self, node):
lenv = self.current_env()
self.visitchildren(node)
if len(node.args) == 1:
node.args.append(ExprNodes.GlobalsExprNode(node.pos))
if not lenv.is_module_scope:
node.args.append(
ExprNodes.LocalsExprNode(
node.pos, self.current_scope_node(), lenv))
return node
def _inject_locals(self, node, func_name):
# locals()/dir()/vars() builtins
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry:
# not the builtin
return node
pos = node.pos
if func_name in ('locals', 'vars'):
if func_name == 'locals' and len(node.args) > 0:
error(self.pos, "Builtin 'locals()' called with wrong number of args, expected 0, got %d"
% len(node.args))
return node
elif func_name == 'vars':
if len(node.args) > 1:
error(self.pos, "Builtin 'vars()' called with wrong number of args, expected 0-1, got %d"
% len(node.args))
if len(node.args) > 0:
return node # nothing to do
return ExprNodes.LocalsExprNode(pos, self.current_scope_node(), lenv)
else: # dir()
if len(node.args) > 1:
error(self.pos, "Builtin 'dir()' called with wrong number of args, expected 0-1, got %d"
% len(node.args))
if len(node.args) > 0:
# optimised in Builtin.py
return node
if lenv.is_py_class_scope or lenv.is_module_scope:
if lenv.is_py_class_scope:
pyclass = self.current_scope_node()
locals_dict = ExprNodes.CloneNode(pyclass.dict)
else:
locals_dict = ExprNodes.GlobalsExprNode(pos)
return ExprNodes.SortedDictKeysNode(locals_dict)
local_names = sorted(var.name for var in lenv.entries.values() if var.name)
items = [ExprNodes.IdentifierStringNode(pos, value=var)
for var in local_names]
return ExprNodes.ListNode(pos, args=items)
def visit_PrimaryCmpNode(self, node):
# special case: for in/not-in test, we do not need to sort locals()
self.visitchildren(node)
if node.operator in 'not_in': # in/not_in
if isinstance(node.operand2, ExprNodes.SortedDictKeysNode):
arg = node.operand2.arg
if isinstance(arg, ExprNodes.NoneCheckNode):
arg = arg.arg
node.operand2 = arg
return node
def visit_CascadedCmpNode(self, node):
return self.visit_PrimaryCmpNode(node)
def _inject_eval(self, node, func_name):
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry or len(node.args) != 1:
return node
# Inject globals and locals
node.args.append(ExprNodes.GlobalsExprNode(node.pos))
if not lenv.is_module_scope:
node.args.append(
ExprNodes.LocalsExprNode(
node.pos, self.current_scope_node(), lenv))
return node
def _inject_super(self, node, func_name):
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry or node.args:
return node
# Inject no-args super
def_node = self.current_scope_node()
if (not isinstance(def_node, Nodes.DefNode) or not def_node.args or
len(self.env_stack) < 2):
return node
class_node, class_scope = self.env_stack[-2]
if class_scope.is_py_class_scope:
def_node.requires_classobj = True
class_node.class_cell.is_active = True
node.args = [
ExprNodes.ClassCellNode(
node.pos, is_generator=def_node.is_generator),
ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
]
elif class_scope.is_c_class_scope:
node.args = [
ExprNodes.NameNode(
node.pos, name=class_node.scope.name,
entry=class_node.entry),
ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
]
return node
def visit_SimpleCallNode(self, node):
# cython.foo
function = node.function.as_cython_attribute()
if function:
if function in InterpretCompilerDirectives.unop_method_nodes:
if len(node.args) != 1:
error(node.function.pos, u"%s() takes exactly one argument" % function)
else:
node = InterpretCompilerDirectives.unop_method_nodes[function](
node.function.pos, operand=node.args[0])
elif function in InterpretCompilerDirectives.binop_method_nodes:
if len(node.args) != 2:
error(node.function.pos, u"%s() takes exactly two arguments" % function)
else:
node = InterpretCompilerDirectives.binop_method_nodes[function](
node.function.pos, operand1=node.args[0], operand2=node.args[1])
elif function == u'cast':
if len(node.args) != 2:
error(node.function.pos, u"cast() takes exactly two arguments")
else:
type = node.args[0].analyse_as_type(self.current_env())
if type:
node = ExprNodes.TypecastNode(node.function.pos, type=type, operand=node.args[1])
else:
error(node.args[0].pos, "Not a type")
elif function == u'sizeof':
if len(node.args) != 1:
error(node.function.pos, u"sizeof() takes exactly one argument")
else:
type = node.args[0].analyse_as_type(self.current_env())
if type:
node = ExprNodes.SizeofTypeNode(node.function.pos, arg_type=type)
else:
node = ExprNodes.SizeofVarNode(node.function.pos, operand=node.args[0])
elif function == 'cmod':
if len(node.args) != 2:
error(node.function.pos, u"cmod() takes exactly two arguments")
else:
node = ExprNodes.binop_node(node.function.pos, '%', node.args[0], node.args[1])
node.cdivision = True
elif function == 'cdiv':
if len(node.args) != 2:
error(node.function.pos, u"cdiv() takes exactly two arguments")
else:
node = ExprNodes.binop_node(node.function.pos, '/', node.args[0], node.args[1])
node.cdivision = True
elif function == u'set':
node.function = ExprNodes.NameNode(node.pos, name=EncodedString('set'))
elif function == u'staticmethod':
node.function = ExprNodes.NameNode(node.pos, name=EncodedString('staticmethod'))
elif self.context.cython_scope.lookup_qualified_name(function):
pass
else:
error(node.function.pos,
u"'%s' not a valid cython language construct" % function)
self.visitchildren(node)
if isinstance(node, ExprNodes.SimpleCallNode) and node.function.is_name:
func_name = node.function.name
if func_name in ('dir', 'locals', 'vars'):
return self._inject_locals(node, func_name)
if func_name == 'eval':
return self._inject_eval(node, func_name)
if func_name == 'super':
return self._inject_super(node, func_name)
return node
class ReplaceFusedTypeChecks(VisitorTransform):
"""
This is not a transform in the pipeline. It is invoked on the specific
versions of a cdef function with fused argument types. It filters out any
type branches that don't match. e.g.
if fused_t is mytype:
...
elif fused_t in other_fused_type:
...
"""
def __init__(self, local_scope):
super(ReplaceFusedTypeChecks, self).__init__()
self.local_scope = local_scope
# defer the import until now to avoid circular import time dependencies
from .Optimize import ConstantFolding
self.transform = ConstantFolding(reevaluate=True)
def visit_IfStatNode(self, node):
"""
Filters out any if clauses with false compile time type check
expression.
"""
self.visitchildren(node)
return self.transform(node)
def visit_PrimaryCmpNode(self, node):
type1 = node.operand1.analyse_as_type(self.local_scope)
type2 = node.operand2.analyse_as_type(self.local_scope)
if type1 and type2:
false_node = ExprNodes.BoolNode(node.pos, value=False)
true_node = ExprNodes.BoolNode(node.pos, value=True)
type1 = self.specialize_type(type1, node.operand1.pos)
op = node.operator
if op in ('is', 'is_not', '==', '!='):
type2 = self.specialize_type(type2, node.operand2.pos)
is_same = type1.same_as(type2)
eq = op in ('is', '==')
if (is_same and eq) or (not is_same and not eq):
return true_node
elif op in ('in', 'not_in'):
# We have to do an instance check directly, as operand2
# needs to be a fused type and not a type with a subtype
# that is fused. First unpack the typedef
if isinstance(type2, PyrexTypes.CTypedefType):
type2 = type2.typedef_base_type
if type1.is_fused:
error(node.operand1.pos, "Type is fused")
elif not type2.is_fused:
error(node.operand2.pos,
"Can only use 'in' or 'not in' on a fused type")
else:
types = PyrexTypes.get_specialized_types(type2)
for specialized_type in types:
if type1.same_as(specialized_type):
if op == 'in':
return true_node
else:
return false_node
if op == 'not_in':
return true_node
return false_node
return node
def specialize_type(self, type, pos):
try:
return type.specialize(self.local_scope.fused_to_specific)
except KeyError:
error(pos, "Type is not specific")
return type
def visit_Node(self, node):
self.visitchildren(node)
return node
class DebugTransform(CythonTransform):
"""
Write debug information for this Cython module.
"""
def __init__(self, context, options, result):
super(DebugTransform, self).__init__(context)
self.visited = set()
# our treebuilder and debug output writer
# (see Cython.Debugger.debug_output.CythonDebugWriter)
self.tb = self.context.gdb_debug_outputwriter
#self.c_output_file = options.output_file
self.c_output_file = result.c_file
# Closure support, basically treat nested functions as if the AST were
# never nested
self.nested_funcdefs = []
# tells visit_NameNode whether it should register step-into functions
self.register_stepinto = False
def visit_ModuleNode(self, node):
self.tb.module_name = node.full_module_name
attrs = dict(
module_name=node.full_module_name,
filename=node.pos[0].filename,
c_filename=self.c_output_file)
self.tb.start('Module', attrs)
# serialize functions
self.tb.start('Functions')
# First, serialize functions normally...
self.visitchildren(node)
# ... then, serialize nested functions
for nested_funcdef in self.nested_funcdefs:
self.visit_FuncDefNode(nested_funcdef)
self.register_stepinto = True
self.serialize_modulenode_as_function(node)
self.register_stepinto = False
self.tb.end('Functions')
# 2.3 compatibility. Serialize global variables
self.tb.start('Globals')
entries = {}
for k, v in node.scope.entries.items():
if (v.qualified_name not in self.visited and not
v.name.startswith('__pyx_') and not
v.type.is_cfunction and not
v.type.is_extension_type):
entries[k]= v
self.serialize_local_variables(entries)
self.tb.end('Globals')
# self.tb.end('Module') # end Module after the line number mapping in
# Cython.Compiler.ModuleNode.ModuleNode._serialize_lineno_map
return node
def visit_FuncDefNode(self, node):
self.visited.add(node.local_scope.qualified_name)
if getattr(node, 'is_wrapper', False):
return node
if self.register_stepinto:
self.nested_funcdefs.append(node)
return node
# node.entry.visibility = 'extern'
if node.py_func is None:
pf_cname = ''
else:
pf_cname = node.py_func.entry.func_cname
attrs = dict(
name=node.entry.name or getattr(node, 'name', '<unknown>'),
cname=node.entry.func_cname,
pf_cname=pf_cname,
qualified_name=node.local_scope.qualified_name,
lineno=str(node.pos[1]))
self.tb.start('Function', attrs=attrs)
self.tb.start('Locals')
self.serialize_local_variables(node.local_scope.entries)
self.tb.end('Locals')
self.tb.start('Arguments')
for arg in node.local_scope.arg_entries:
self.tb.start(arg.name)
self.tb.end(arg.name)
self.tb.end('Arguments')
self.tb.start('StepIntoFunctions')
self.register_stepinto = True
self.visitchildren(node)
self.register_stepinto = False
self.tb.end('StepIntoFunctions')
self.tb.end('Function')
return node
def visit_NameNode(self, node):
if (self.register_stepinto and
node.type is not None and
node.type.is_cfunction and
getattr(node, 'is_called', False) and
node.entry.func_cname is not None):
# don't check node.entry.in_cinclude, as 'cdef extern: ...'
# declared functions are not 'in_cinclude'.
# This means we will list called 'cdef' functions as
# "step into functions", but this is not an issue as they will be
# recognized as Cython functions anyway.
attrs = dict(name=node.entry.func_cname)
self.tb.start('StepIntoFunction', attrs=attrs)
self.tb.end('StepIntoFunction')
self.visitchildren(node)
return node
def serialize_modulenode_as_function(self, node):
"""
Serialize the module-level code as a function so the debugger will know
it's a "relevant frame" and it will know where to set the breakpoint
for 'break modulename'.
"""
name = node.full_module_name.rpartition('.')[-1]
cname_py2 = 'init' + name
cname_py3 = 'PyInit_' + name
py2_attrs = dict(
name=name,
cname=cname_py2,
pf_cname='',
# Ignore the qualified_name, breakpoints should be set using
# `cy break modulename:lineno` for module-level breakpoints.
qualified_name='',
lineno='1',
is_initmodule_function="True",
)
py3_attrs = dict(py2_attrs, cname=cname_py3)
self._serialize_modulenode_as_function(node, py2_attrs)
self._serialize_modulenode_as_function(node, py3_attrs)
def _serialize_modulenode_as_function(self, node, attrs):
self.tb.start('Function', attrs=attrs)
self.tb.start('Locals')
self.serialize_local_variables(node.scope.entries)
self.tb.end('Locals')
self.tb.start('Arguments')
self.tb.end('Arguments')
self.tb.start('StepIntoFunctions')
self.register_stepinto = True
self.visitchildren(node)
self.register_stepinto = False
self.tb.end('StepIntoFunctions')
self.tb.end('Function')
def serialize_local_variables(self, entries):
for entry in entries.values():
if not entry.cname:
# not a local variable
continue
if entry.type.is_pyobject:
vartype = 'PythonObject'
else:
vartype = 'CObject'
if entry.from_closure:
# We're dealing with a closure where a variable from an outer
# scope is accessed, get it from the scope object.
cname = '%s->%s' % (Naming.cur_scope_cname,
entry.outer_entry.cname)
qname = '%s.%s.%s' % (entry.scope.outer_scope.qualified_name,
entry.scope.name,
entry.name)
elif entry.in_closure:
cname = '%s->%s' % (Naming.cur_scope_cname,
entry.cname)
qname = entry.qualified_name
else:
cname = entry.cname
qname = entry.qualified_name
if not entry.pos:
# this happens for variables that are not in the user's code,
# e.g. for the global __builtins__, __doc__, etc. We can just
# set the lineno to 0 for those.
lineno = '0'
else:
lineno = str(entry.pos[1])
attrs = dict(
name=entry.name,
cname=cname,
qualified_name=qname,
type=vartype,
lineno=lineno)
self.tb.start('LocalVar', attrs)
self.tb.end('LocalVar')
| hhsprings/cython | Cython/Compiler/ParseTreeTransforms.py | Python | apache-2.0 | 117,668 | [
"VisIt"
] | dab751da703a425f9c2127dcfe92e0f1389cebb41f4bc822262729526a25f2c1 |
'''
Created on Oct 12, 2015
@author: Aaron Klein
'''
import logging
import george
import numpy as np
from scipy import optimize
from copy import deepcopy
from robo.models.base_model import BaseModel
logger = logging.getLogger(__name__)
class GaussianProcess(BaseModel):
def __init__(self, kernel, prior=None,
noise=1e-3, use_gradients=False,
basis_func=None, dim=None, normalize_output=False,
*args, **kwargs):
"""
Interface to the george GP library. The GP hyperparameter are obtained
by optimizing the marginal loglikelihood.
Parameters
----------
kernel : george kernel object
Specifies the kernel that is used for all Gaussian Process
prior : prior object
Defines a prior for the hyperparameters of the GP. Make sure that
it implements the Prior interface.
noise : float
Noise term that is added to the diagonal of the covariance matrix
for the cholesky decomposition.
use_gradients : bool
Use gradient information to optimize the negative log likelihood
"""
self.kernel = kernel
self.model = None
self.prior = prior
self.noise = noise
self.use_gradients = use_gradients
self.basis_func = basis_func
self.dim = dim
self.normalize_output = normalize_output
self.X = None
self.Y = None
def scale(self, x, new_min, new_max, old_min, old_max):
return ((new_max - new_min) *
(x - old_min) / (old_max - old_min)) + new_min
def train(self, X, Y, do_optimize=True):
"""
Computes the cholesky decomposition of the covariance of X and
estimates the GP hyperparameter by optimizing the marginal
loglikelihood. The prior mean of the GP is set to the empirical
mean of the X.
Parameters
----------
X: np.ndarray (N, D)
Input data points. The dimensionality of X is (N, D),
with N as the number of points and D is the number of features.
Y: np.ndarray (N, 1)
The corresponding target values.
do_optimize: boolean
If set to true the hyperparameters are optimized.
"""
self.X = X
# For Fabolas we transform s to (1 - s)^2
if self.basis_func is not None:
self.X = deepcopy(X)
self.X[:, self.dim] = self.basis_func(self.X[:, self.dim])
self.Y = Y
if self.normalize_output:
self.Y_mean = np.mean(Y)
self.Y_std = np.std(Y)
self.Y = (Y - self.Y_mean) / self.Y_std
# Use the empirical mean of the data as mean for the GP
self.mean = np.mean(self.Y, axis=0)
self.model = george.GP(self.kernel, mean=self.mean)
if do_optimize:
self.hypers = self.optimize()
self.model.kernel[:] = self.hypers[:-1]
self.noise = np.exp(self.hypers[-1]) ## sigma^2
else:
self.hypers = self.model.kernel[:]
self.hypers = np.append(self.hypers, np.log(self.noise))
logger.info("HYPERS: " + str(self.hypers))
self.model.compute(self.X, yerr=np.sqrt(self.noise))
def get_noise(self):
return self.noise
def nll(self, theta):
"""
Returns the negative marginal log likelihood (+ the prior) for
a hyperparameter configuration theta.
(negative because we use scipy minimize for optimization)
Parameters
----------
theta : np.ndarray(H)
Hyperparameter vector. Note that all hyperparameter are
on a log scale.
Returns
----------
float
lnlikelihood + prior
"""
# Specify bounds to keep things sane
if np.any((-20 > theta) + (theta > 20)):
return 1e25
# The last entry of theta is always the noise
self.model.kernel[:] = theta[:-1]
noise = np.exp(theta[-1]) # sigma^2
self.model.compute(self.X, yerr=np.sqrt(noise))
ll = self.model.lnlikelihood(self.Y[:, 0], quiet=True)
# Add prior
if self.prior is not None:
ll += self.prior.lnprob(theta)
# We add a minus here because scipy is minimizing
return -ll if np.isfinite(ll) else 1e25
def grad_nll(self, theta):
self.model.kernel[:] = theta[:-1]
noise = np.exp(theta[-1])
self.model.compute(self.X, yerr=np.sqrt(noise))
self.model._compute_alpha(self.Y[:, 0])
K_inv = self.model.solver.apply_inverse(np.eye(self.model._alpha.size),
in_place=True)
# The gradients of the Gram matrix, for the noise this is just
# the identiy matrix
Kg = self.model.kernel.gradient(self.model._x)
Kg = np.concatenate((Kg, np.eye(Kg.shape[0])[:, :, None]), axis=2)
# Calculate the gradient.
A = np.outer(self.model._alpha, self.model._alpha) - K_inv
g = 0.5 * np.einsum('ijk,ij', Kg, A)
if self.prior is not None:
g += self.prior.gradient(theta)
return -g
def optimize(self):
# Start optimization from the previous hyperparameter configuration
p0 = self.model.kernel.vector
p0 = np.append(p0, np.log(self.noise))
if self.use_gradients:
bounds = [(-10, 10)] * (len(self.kernel) + 1)
theta, _, _ = optimize.fmin_l_bfgs_b(self.nll, p0,
fprime=self.grad_nll,
bounds=bounds)
else:
results = optimize.minimize(self.nll, p0)
theta = results.x
return theta
def predict_variance(self, X1, X2):
r"""
Predicts the variance between two test points X1, X2 by
math: \sigma(X_1, X_2) = k_{X_1,X_2} - k_{X_1,X} * (K_{X,X}
+ \sigma^2*\mathds{I})^-1 * k_{X,X_2})
Parameters
----------
X1: np.ndarray (N, D)
First test point
X2: np.ndarray (N, D)
Second test point
Returns
----------
np.array(N,1)
predictive variance
"""
x_ = np.concatenate((X1, X2))
_, var = self.predict(x_)
var = var[:-1, -1, np.newaxis]
return var
def predict(self, X, **kwargs):
r"""
Returns the predictive mean and variance of the objective function at
the specified test point.
Parameters
----------
X: np.ndarray (N, D)
Input test points
Returns
----------
np.array(N,1)
predictive mean
np.array(N,1)
predictive variance
"""
# For Fabolas we transform s to (1 - s)^2
if self.basis_func is not None:
X_test = deepcopy(X)
X_test[:, self.dim] = self.basis_func(X_test[:, self.dim])
else:
X_test = X
if self.model is None:
logger.error("The model has to be trained first!")
raise ValueError
mu, var = self.model.predict(self.Y[:, 0], X_test)
# Clip negative variances and set them to the smallest
# positive float values
if var.shape[0] == 1:
var = np.clip(var, np.finfo(var.dtype).eps, np.inf)
else:
var[np.diag_indices(var.shape[0])] = np.clip(
var[np.diag_indices(var.shape[0])],
np.finfo(var.dtype).eps, np.inf)
var[np.where((var < np.finfo(var.dtype).eps) & (var > -np.finfo(var.dtype).eps))] = 0
return mu[:, np.newaxis], var
def sample_functions(self, X_test, n_funcs=1):
"""
Samples F function values from the current posterior at the N
specified test point.
Parameters
----------
X_test: np.ndarray (N, D)
Input test points
n_funcs: int
Number of function values that are drawn at each test point.
Returns
----------
np.array(F,N)
The F function values drawn at the N test points.
"""
return self.model.sample_conditional(self.Y[:, 0], X_test, n_funcs)
def predictive_gradients(self, X_test):
dmdx, dvdx = self.m.predictive_gradients(X_test)
return dmdx[:, 0, :], dvdx | aaronkl/RoBO | robo/models/gaussian_process.py | Python | bsd-3-clause | 8,620 | [
"Gaussian"
] | 6483c580b2a15b373cafd06a7468866d972542d716628fa802ae080aefd4f81a |
#!/usr/bin/python3
#-*- coding:utf-8 -*-
'''
main part
'''
import asyncio
import sys
from itertools import zip_longest
from collections import namedtuple
import traceback
import aiohttp
from .taskqueue import TaskQueue, makeTask
from .log import logging
DEFAULT_HEADER = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
}
_Request = namedtuple(
"Request", ["method", "url", "header", "data", "callback"])
def Request(method, url, header=DEFAULT_HEADER, data=None, callback=None):
return _Request(method, url, header, data, callback)
class Spider:
'''
spider class
'''
# Log levels,
CRITICAL = logging.CRITICAL
ERROR = logging.ERROR
WARNING = logging.WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
default_config = {
# How many requests can be run in parallel
"concurrent": 5,
# How many download requests can be run in parallel
"download_concurrent": 5,
# How long to wait after each request
"delay": 0,
# A stream to where internal logs are sent, optional
"logs": sys.stdout,
# Re - visit visited URLs, false by default
"allowDuplicates": False,
#
"chunk_size": 1024,
}
def __init__(self, **kwargs):
'''
config default
'''
self.config = Spider.default_config.copy()
self.config.update(kwargs.get("config", {}))
self.loop = kwargs.get("loop", None) # if no loop, get one
if self.loop is None or not isinstance(self.loop, asyncio.BaseEventLoop):
self.loop = asyncio.get_event_loop()
'''
if no session , new one.
Providing a session is convenient when you spider some that you need to login, you can just pass a logged-in session.
Of course, you can provide a function which will be call before all spider requests to log in.
'''
self.session = kwargs.get("session", None)
if self.session is None or not isinstance(self.session, aiohttp.ClientSession):
self.session = aiohttp.ClientSession(loop=self.loop)
'''
The methods contained here will be called before any requests.
For example,if spider need to login, you may need provide login method.
The variable `will_continue` stands for whether this spider continue or not after all `before_start_funcs` called.
'''
self.before_start_funcs = []
self.will_continue = True
'''
The methods contained here will be called after all requests.
For example,if spider need to logout, you may need provide logout method.
'''
self.after_crawl_funcs = []
'''
spider's logger
'''
self.logger = logging.getLogger(self.__class__.__name__)
'''
The reasons that only sipder's download_pending uses TaskQueue are:
1. TaskQueue is still not stable.
2. When there are too many request waited to send, it has to keep many contexts for each waiting request
including the method request_with_callback. So the request queue still use asyncio.Queue.
'''
self.pending = asyncio.Queue()
# downloading concurrent should not be too large.
self.download_pending = TaskQueue(
maxsize=self.config["download_concurrent"])
self.visited = set()
# you cannot call method `start` twice.
self.running = False
# active tasks
self.active = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._cancel()
if not self.session.closed:
self.loop.run_until_complete(self.session.close())
if not self.loop.is_closed():
self.loop.stop()
self.loop.run_forever()
self.loop.close()
def _cancel(self):
for task in self.active:
task.cancel()
def log(self, lvl, msg):
self.logger.log(lvl, msg)
def add_request(self, url, callback, method="GET", **kwargs):
'''
Add request wo queue.
:param url: request's url
:param callback: which will be called after request finished.
:param method: request's method
:param kwargs: additional parameters for request
:return: None
'''
if url in self.visited:
return
if not self.config["allowDuplicates"]:
self.visited.add(url)
request = Request(method, url, callback=callback)
self.pending.put_nowait(request)
self.log(logging.INFO, "Add url: {} to queue.".format(url))
def add_requests(self, urls, callbacks):
'''
add many targets and callback once.
if targets are more than callbacks, None will be used to fillup.
if targets are less than callbacks, callbacks will be cut.
'''
if isinstance(urls, (list, tuple)) and isinstance(callbacks, (list, tuple)):
if len(urls) >= len(callbacks):
pass
else:
callbacks = callbacks[:len(urls)]
for url, callback in zip_longest(urls, callbacks):
self.add_request(url, callback)
elif isinstance(urls, str):
self.add_request(urls, callbacks)
def before_start(self, func):
'''
add function called before start
:param func:
:return:
'''
self.before_start_funcs.append(func)
return func
def after_spider(self, func):
self.after_crawl_funcs.append(func)
return func
async def try_trigger_before_start_functions(self):
for func in self.before_start_funcs:
if callable(func):
if asyncio.iscoroutinefunction(func):
await func(self)
else:
func(self)
async def try_trigger_after_crawl_functions(self):
for func in self.after_crawl_funcs:
if callable(func):
if asyncio.iscoroutinefunction(func):
await func(self)
else:
func(self)
async def load(self):
'''
pop request from queue to send
:return:
'''
try:
while True:
request = await self.pending.get()
self.log(logging.INFO,
"Loading url: {} from queue.".format(request.url))
await self.request_with_callback(request, request.callback)
self.pending.task_done()
except asyncio.CancelledError:
pass
async def request_with_callback(self, request: _Request, callback=None):
if not callback:
callback = request.callback
if callable(callback):
try:
async with self.session.request(request.method, request.url) as resp:
'''
if callback is a coroutine-function, the await is necessary.
if not, call_soon_threadsafe is better.
But why not coroutine ?
'''
if asyncio.iscoroutinefunction(callback):
await callback(resp)
else:
self.loop.call_soon_threadsafe(callback, resp)
self.log(logging.INFO, "Request [{method}] `{url}` finishend.(There are still {num})".format(
method=request.method, url=request.url, num=self.pending.qsize()))
except Exception as e:
self.log(logging.ERROR, "Error happened in request [{method}] `{url}`, Request is ignored.\n{error}".format(
error=traceback.format_exc(), url=request.url, method=request.method))
else:
self.log(logging.WARNING, "Callback for request [{method}] `{url}` is not callable. Request is ignored.".format(
url=request.url, method=request.method))
async def download(self, src, dst):
'''
async def save(resp, dst=dst):
with open(dst, "wb") as fd:
while True:
chunk = await resp.content.read()
if not chunk:
break
fd.write(chunk)
self.log(logging.INFO, "Target `{src}` download to {dst}".format(src=resp.url, dst=dst))
'''
#self.log(logging.INFO, "Add download task : {src}".format(src=src))
# await self.download_pending.put(makeTask(self.request_with_callback,
# Request("GET", src, callback=save)))
"""
Unfortunately, TaskQueue doesn't work well when there too many download tasks.
So raw request may be better.
"""
self.add_download(src, dst)
# await self.request_with_callback(Request("GET", src, callback=save))
def add_download(self, src, dst):
'''
add download task in a synchronous way.
'''
async def save(resp, dst=dst):
with open(dst, "wb") as fd:
while True:
chunk = await resp.content.read(self.config["chunk_size"])
if not chunk:
break
fd.write(chunk)
self.log(logging.INFO, "Target `{src}` download to {dst}".format(
src=resp.url, dst=dst))
self.log(logging.INFO, "Add download task : {src}".format(src=src))
self.download_pending.add_task(
makeTask(self.request_with_callback, Request("GET", src, callback=save)))
async def __start(self):
for _ in range(self.config["concurrent"]):
self.active.append(asyncio.ensure_future(
self.load(), loop=self.loop))
self.log(
logging.INFO, "Spider has been started. Waiting for all requests and download tasks to finish.")
await self.pending.join()
self.log(logging.INFO, "Requests have finished. Waiting for download task.")
await self.download_pending.join()
def start(self, urls, callbacks):
if self.running:
self.log("Warning", "Spider is running now.")
return
self.running = True
self.add_requests(urls, callbacks)
self.loop.run_until_complete(self.try_trigger_before_start_functions())
# before_start_functions can change will_continue vaule.
if self.will_continue:
self.log(logging.INFO, "Spider Start.")
self.loop.run_until_complete(self.__start())
else:
self.log(logging.WARN,
"Spider canceled by the last `before_start_function`.")
self.running = False
self.log(logging.INFO, "All tasks done.Spider starts to shutdown.")
self.loop.run_until_complete(self.try_trigger_after_crawl_functions())
self.log(logging.INFO, "Spider shutdown.")
| HeartUnchange/aiospider | aiospider/spider.py | Python | mit | 11,168 | [
"VisIt"
] | cf42b0f0ca544ac2fee28bf5f9394ca3b929bec18fec70f38850917d6a0b39d2 |
# RoboControl.py
# Contains class for connecting to a scenery robot network and send data as a thread-based class
# Version - 0.1
# Author - Brian Nguyen
# Requires Python 2.7
# Requires XInputJoystick library
import sys
import time
import threading
from collections import deque
from xinput import XInputJoystick
from operator import itemgetter, attrgetter
class RoboControl(threading.Thread):
def __init__(self, DEBUG=False):
threading.Thread.__init__(self)
# Set debugging flag
self.debug = DEBUG
# Setup variables to maintain real time left and right axis values
self.left_value = 0
self.right_value = 0
# Setup connection flag as initially False
self.connected = False
# Maps internal button integer number to human-readable syntax
self.button_map = {
"1": "DUP", "2": "DDOWN", "3": "DLEFT", "4": "DRIGHT",
"5": "START", "6": "BACK", "7": "LJOYTOGGLE", "8": "RJOYTOGGLE",
"9": "LB", "10": "RB", "13": "A", "14": "B", "15": "X", "16": "Y",
}
# Maps human-readable syntax to internal button number
self.inv_button_map = {v: k for k, v in self.button_map.items()}
self.joysticks = XInputJoystick.enumerate_devices()
self.j = None
# Flag for running thread to exit
self.exit = False
# Flag for parent app to keep track of recording
self.recording = False
# Use start button as recording button. When first pressed, recording is true, when pressed again recording is false
self.start_last_state = False
self.start_curr_state = False
# Queue of commands
self.commands = deque()
# Set status string that can be passed to parent GUI
self.status = 'Controller Connection: Disconnected'
def is_connected(self):
"""
Returns True if controller is connected and false if disconnected
"""
return self.connected
def is_recording(self):
"""
"""
return self.recording
def connect(self):
"""
Attempt connection to a joystick
Returns True if connection succeeded
Returns False if connection fails
"""
# Grabs 1st available gamepad, logging changes to the screen
self.joysticks = XInputJoystick.enumerate_devices()
self.device_numbers = list(map(attrgetter('device_number'), self.joysticks))
if self.debug:
print('found %d controller devices: %s' % (len(self.joysticks), self.device_numbers))
# Attempt to connect to first joystick
if not self.joysticks:
if self.debug:
self.status = 'Controller Connection: No controller found'
print("No joysticks found, exiting")
self.connected = False
return False
else:
self.j = self.joysticks[0]
self.connected = True
self.status = 'Controller Connection: Connected'
""" Define event handlers for axis and buttons """
@self.j.event
def on_button(button, pressed):
#self.exit = (button == self.get_button_num('BACK') and pressed)
if button == self.get_button_num('START'):
self.start_curr_state = pressed
if self.start_curr_state != self.start_last_state and pressed:
self.recording = not self.recording
self.start_last_state = self.start_curr_state
@self.j.event
def on_axis(axis, value):
left_speed = 0
right_speed = 0
if axis == 'l_thumb_y':
# Maps analog values of -0.5 to 0.5 to -127 to 127 for motor control
value_convert = int(round(sensitivity_scale(value, 0.5, -0.5, 0.5, -127, 127)))
# Account for noisy deadzone in middle of joysticks
if (abs(value_convert) <= 10):
value_convert = 0
self.left_value = value_convert
elif axis == 'r_thumb_y':
# Maps analog values of -0.5 to 0.5 to -127 to 127 for motor control
value_convert = int(round(sensitivity_scale(value, 0.5, -0.5, 0.5, -127, 127)))
# Account for noisy deadzone in middle of joysticks
if (abs(value_convert) <= 10):
value_convert = 0
self.right_value = value_convert
if self.debug:
print('Using device %d' % self.j.device_number)
print('Press back button on controller to quit.')
return True
def disconnect(self):
"""
Disconnects controller
"""
if self.debug:
print("Disconnecting controller")
self.connected = False
def get_button_num(self, name):
"""
Returns internal button number based on name
Ex. "START" -> 5
"""
return int(self.inv_button_map[name])
def run(self):
"""
Continually runs thread as long as exit flag is False
"""
while not self.exit:
# If controller connected, dispatch events, otherwise just loop
if self.connected:
try:
# Register any controller events
self.j.dispatch_events()
except RuntimeError:
print("Controller is not connected!")
self.status = 'Controller Connection: Controller Disconnected'
self.connected = False
time.sleep(0.01)
print("Control Thread Ending")
def close(self):
"""
Sets the exit flag to end the main loop
"""
self.connected = False
self.exit = True
return True
def sensitivity_scale(x_in, sensitivity, original_min, original_max, desired_min, desired_max):
"""
Returns smoothed data point mapped to new range based on sensitivity
"""
linear_scale = ((desired_max - desired_min) * (x_in - original_min))/(original_max-original_min) + desired_min
scale_to_1 = 2 / (original_max-original_min)
xin_sensitive = sensitivity * (x_in*scale_to_1)**3 + (1-sensitivity) * (x_in*scale_to_1)
xin_sensitive /= scale_to_1
sensitivity_scaled = ((desired_max - desired_min) * (xin_sensitive - original_min))/(original_max-original_min) + desired_min
return sensitivity_scaled
| b-nguyen/scenery-robot | RoboControl.py | Python | mit | 5,566 | [
"Brian"
] | 2de7d58528f4699839a8b806964dcae6968846c03fcf16a6a56d269b0cf84f87 |
# -*- coding: utf-8 -*-
'''
Pupil Player Third Party Plugins by cpicanco
Copyright (C) 2016 Rafael Picanço.
The present file is distributed under the terms of the GNU General Public License (GPL v3.0).
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cv2
from pyglui import ui
from plugin import Plugin
blue, green, red = 0, 1, 2
class Filter_Opencv_Adaptative_Threshold(Plugin):
"""
Apply cv2.adaptativeThreshold in each channel of the (world) frame.img
"""
uniqueness = "not_unique"
def __init__(self, g_pool, threshold=255, thresh_mode="BINARY",
adaptive_method="GAUSSIAN", block_size=5,constant=-1,blur=3):
super(Filter_Opencv_Adaptative_Threshold, self).__init__(g_pool)
# run before all plugins
# self.order = .1
# run after all plugins
self.order = .99
# initialize empty menu
self.menu = None
# filter properties
self.threshold = threshold
self.thresh_mode = thresh_mode
self.adaptive_method = adaptive_method
self.block_size = block_size
self.constant = constant
self.blur = blur
def update(self,frame,events):
# thresh_mode
if self.thresh_mode == "NONE":
return
if self.thresh_mode == "BINARY":
cv2_thresh_mode = cv2.THRESH_BINARY
if self.thresh_mode == "BINARY_INV":
cv2_thresh_mode = cv2.THRESH_BINARY_INV
if self.adaptive_method == "MEAN":
cv2_adaptive_method = cv2.ADAPTIVE_THRESH_MEAN_C
if self.adaptive_method == "GAUSSIAN":
cv2_adaptive_method = cv2.ADAPTIVE_THRESH_GAUSSIAN_C
# apply the threshold to each channel
for i, channel in enumerate((frame.img[:,:,blue], frame.img[:,:,green], frame.img[:,:,red])):
if self.blur > 1:
channel = cv2.GaussianBlur(channel,(self.blur,self.blur),0)
edg = cv2.adaptiveThreshold(channel,
maxValue=self.threshold,
adaptiveMethod = cv2_adaptive_method,
thresholdType = cv2_thresh_mode,
blockSize = self.block_size,
C = self.constant)
frame.img[:,:,i] = edg
def init_gui(self):
# initialize the menu
self.menu = ui.Scrolling_Menu('Adaptative Threshold')
# add menu to the window
self.g_pool.gui.append(self.menu)
# append elements to the menu
self.menu.append(ui.Button('remove',self.unset_alive))
self.menu.append(ui.Info_Text('Filter Properties'))
self.menu.append(ui.Selector('thresh_mode',self,label='Thresh Mode',selection=["NONE","BINARY","BINARY_INV"] ))
self.menu.append(ui.Selector('adaptive_method',self,label='Adaptive Method',selection=["GAUSSIAN","MEAN"] ))
self.menu.append(ui.Slider('threshold',self,min=0,step=1,max=255,label='Threshold'))
self.menu.append(ui.Slider('block_size',self,min=3,step=2,max=55,label='Block Size'))
self.menu.append(ui.Slider('constant',self,min=-30,step=1,max=30,label='Constant'))
self.menu.append(ui.Slider('blur',self,min=1,step=2,max=55,label='Blur'))
def deinit_gui(self):
if self.menu:
self.g_pool.gui.remove(self.menu)
self.menu = None
def unset_alive(self):
self.alive = False
def get_init_dict(self):
# persistent properties throughout sessions
return {'threshold':self.threshold,
'thresh_mode':self.thresh_mode,
'adaptive_method':self.adaptive_method,
'block_size':self.block_size,
'constant':self.constant,
'blur':self.blur}
def cleanup(self):
""" called when the plugin gets terminated.
This happens either voluntarily or forced.
if you have a GUI or glfw window destroy it here.
"""
self.deinit_gui() | cpicanco/player_plugins | filter_opencv_adaptative_threshold.py | Python | gpl-3.0 | 3,542 | [
"Gaussian"
] | d083063ef78fff283a775d25d075a9ad4bd5794a6b74cbc930fc673dc89d0ce6 |
# Copyright (C) 2013 Ben Morris (ben@bendmorris.com)
# based on code by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes corresponding to NeXML trees.
See classes in `Bio.Nexus`: Trees.Tree, Trees.NodeData, and Nodes.Chain.
"""
__docformat__ = "restructuredtext en"
from Bio.Phylo import BaseTree
class Tree(BaseTree.Tree):
"""NeXML Tree object."""
def __init__(self, root=None, rooted=False, id=None, name=None, weight=1.0):
BaseTree.Tree.__init__(self, root=root or Clade(),
rooted=rooted, id=id, name=name)
self.weight = weight
class Clade(BaseTree.Clade):
"""NeXML Clade (sub-tree) object."""
def __init__(self, branch_length=1.0, name=None, clades=None,
confidence=None, comment=None, **kwargs):
BaseTree.Clade.__init__(self, branch_length=branch_length,
name=name, clades=clades, confidence=confidence)
self.comment = comment
for key, value in kwargs.items():
setattr(self, key, value)
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Phylo/NeXML.py | Python | gpl-2.0 | 1,219 | [
"Biopython"
] | 7d5bd08dabd23685fc4ac941eca3bf4ee675d248cc764384acb7f52d39d86fbf |
#!/usr/bin/env python
"""
Rotate shape
"""
from __future__ import print_function
import argparse
import time
import sys
import re
from icqsol.shapes.icqShapeManager import ShapeManager
from icqsol import util
# time stamp
tid = re.sub(r'\.', '', str(time.time()))
parser = argparse.ArgumentParser(description='Translate shape.')
parser.add_argument('--input', dest='input', default='',
help='List of input files (PLY or VTK)')
parser.add_argument('--angle', dest='angle', type=float, default=0.0,
help='Specify rotation angle in degrees')
parser.add_argument('--axis', dest='axis', default="0., 0., 1.",
help='Specify rotation axis (3 floating point numbers)')
parser.add_argument('--ascii', dest='ascii', action='store_true',
help='Save data in ASCII format (default is binary)')
parser.add_argument('--output',
dest='output',
default='createCompositeShape-{0}.vtk'.format(tid),
help='Output file.')
args = parser.parse_args()
if not args.input:
print('ERROR: must specify one input file with --input <file>')
sys.exit(3)
# Get the format of the input - either vtk or ply.
file_format = util.getFileFormat(args.input)
if args.ascii:
file_type = util.ASCII
else:
file_type = util.BINARY
if file_format == util.VTK_FORMAT:
# We have a VTK file, so get the dataset type.
vtk_dataset_type = util.getVtkDatasetType(args.input)
shape_mgr = ShapeManager(file_format=file_format, vtk_dataset_type=vtk_dataset_type)
else:
shape_mgr = ShapeManager(file_format=file_format)
pdata = shape_mgr.loadAsVtkPolyData(args.input)
axis = eval(args.axis)
# Rotate.
shape_mgr.rotateVtkPolyData(pdata, angleDeg=args.angle, axis=axis)
if args.output:
shape_mgr.saveVtkPolyData(pdata, file_name=args.output, file_type=file_type)
| gregvonkuster/icqsol | examples/rotateShape.py | Python | mit | 1,899 | [
"VTK"
] | c06e4b9a1103cacd3fcef800a2d13511227ca985e4daa5a460064103ff089703 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import collections
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element, Specie, DummySpecie,\
get_el_sp
from monty.json import MSONable
from pymatgen.util.coord_utils import pbc_diff
from pymatgen.core.composition import Composition
"""
This module defines classes representing non-periodic and periodic sites.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 17, 2012"
class Site(collections.Hashable, MSONable):
"""
A generalized *non-periodic* site. This is essentially a composition
at a point in space, with some optional properties associated with it. A
Composition is used to represent the atoms and occupancy, which allows for
disordered site representation. Coords are given in standard cartesian
coordinates.
"""
position_atol = 1e-5
def __init__(self, atoms_n_occu, coords, properties=None):
"""
Create a *non-periodic* site.
Args:
atoms_n_occu: Species on the site. Can be:
i. A Composition object (preferred)
ii. An element / specie specified either as a string
symbols, e.g. "Li", "Fe2+", "P" or atomic numbers,
e.g., 3, 56, or actual Element or Specie objects.
iii.Dict of elements/species and occupancies, e.g.,
{"Fe" : 0.5, "Mn":0.5}. This allows the setup of
disordered structures.
coords: Cartesian coordinates of site.
properties: Properties associated with the site as a dict, e.g.
{"magmom": 5}. Defaults to None.
"""
if isinstance(atoms_n_occu, Composition):
# Compositions are immutable, so don't need to copy (much much faster)
self._species = atoms_n_occu
# Kludgy lookup of private attribute, but its faster
totaloccu = atoms_n_occu._natoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
# Another kludgy lookup of private attribute, but its faster
self._is_ordered = totaloccu == 1 and len(self._species._data) == 1
else:
try:
self._species = Composition({get_el_sp(atoms_n_occu): 1})
self._is_ordered = True
except TypeError:
self._species = Composition(atoms_n_occu)
totaloccu = self._species.num_atoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
self._is_ordered = totaloccu == 1 and len(self._species) == 1
self._coords = coords
self._properties = properties if properties else {}
@property
def properties(self):
"""
Returns a view of properties as a dict.
"""
return {k: v for k, v in self._properties.items()}
def __getattr__(self, a):
# overriding getattr doens't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
raise AttributeError(a)
def distance(self, other):
"""
Get distance between two sites.
Args:
other: Other site.
Returns:
Distance (float)
"""
return np.linalg.norm(other.coords - self.coords)
def distance_from_point(self, pt):
"""
Returns distance between the site and a point in space.
Args:
pt: Cartesian coordinates of point.
Returns:
Distance (float)
"""
return np.linalg.norm(np.array(pt) - self._coords)
@property
def species_string(self):
"""
String representation of species on the site.
"""
if self._is_ordered:
return list(self._species.keys())[0].__str__()
else:
sorted_species = sorted(self._species.keys())
return ", ".join(["{}:{:.3f}".format(sp, self._species[sp])
for sp in sorted_species])
@property
def species_and_occu(self):
"""
The species at the site, i.e., a Composition mapping type of
element/species to occupancy.
"""
return self._species
@property
def specie(self):
"""
The Specie/Element at the site. Only works for ordered sites. Otherwise
an AttributeError is raised. Use this property sparingly. Robust
design should make use of the property species_and_occu instead.
Raises:
AttributeError if Site is not ordered.
"""
if not self._is_ordered:
raise AttributeError("specie property only works for ordered "
"sites!")
return list(self._species.keys())[0]
@property
def coords(self):
"""
A copy of the cartesian coordinates of the site as a numpy array.
"""
return np.copy(self._coords)
@property
def is_ordered(self):
"""
True if site is an ordered site, i.e., with a single species with
occupancy 1.
"""
return self._is_ordered
@property
def x(self):
"""
Cartesian x coordinate
"""
return self._coords[0]
@property
def y(self):
"""
Cartesian y coordinate
"""
return self._coords[1]
@property
def z(self):
"""
Cartesian z coordinate
"""
return self._coords[2]
def __getitem__(self, el):
"""
Get the occupancy for element
"""
return self._species[el]
def __eq__(self, other):
"""
Site is equal to another site if the species and occupancies are the
same, and the coordinates are the same to some tolerance. numpy
function `allclose` is used to determine if coordinates are close.
"""
if other is None:
return False
return self._species == other._species and \
np.allclose(self._coords, other._coords,
atol=Site.position_atol) and \
self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between Sites
with different elements.
"""
return sum([el.Z for el in self._species.keys()])
def __contains__(self, el):
return el in self._species
def __repr__(self):
return "Site: {} ({:.4f}, {:.4f}, {:.4f})".format(
self.species_string, *self._coords)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted in LiFePO4.
"""
if self._species.average_electroneg < other._species.average_electroneg:
return True
if self._species.average_electroneg > other._species.average_electroneg:
return False
if self.species_string < other.species_string:
return True
if self.species_string > other.species_string:
return False
return False
def __str__(self):
return "{} {}".format(self._coords, self.species_string)
def as_dict(self):
"""
Json-serializable dict representation for Site.
"""
species_list = []
for spec, occu in self._species.items():
d = spec.as_dict()
del d["@module"]
del d["@class"]
d["occu"] = occu
species_list.append(d)
d = {"name": self.species_string, "species": species_list,
"xyz": [float(c) for c in self._coords],
"properties": self._properties,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
"""
Create Site from dict representation
"""
atoms_n_occu = {}
for sp_occu in d["species"]:
if "oxidation_state" in sp_occu and Element.is_valid_symbol(
sp_occu["element"]):
sp = Specie.from_dict(sp_occu)
elif "oxidation_state" in sp_occu:
sp = DummySpecie.from_dict(sp_occu)
else:
sp = Element(sp_occu["element"])
atoms_n_occu[sp] = sp_occu["occu"]
props = d.get("properties", None)
return cls(atoms_n_occu, d["xyz"], properties=props)
class PeriodicSite(Site, MSONable):
"""
Extension of generic Site object to periodic systems.
PeriodicSite includes a lattice system.
"""
def __init__(self, atoms_n_occu, coords, lattice, to_unit_cell=False,
coords_are_cartesian=False, properties=None):
"""
Create a periodic site.
Args:
atoms_n_occu: Species on the site. Can be:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (3x1 array or sequence): Coordinates of site as fractional
or cartesian coordinates.
lattice: Lattice associated with the site
to_unit_cell (bool): Translates fractional coordinate to the
basic unit cell, i.e. all fractional coordinates satisfy 0
<= a < 1. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
cartesian coordinates. Defaults to False.
properties (dict): Properties associated with the PeriodicSite,
e.g., {"magmom":5}. Defaults to None.
"""
self._lattice = lattice
if coords_are_cartesian:
self._fcoords = self._lattice.get_fractional_coords(coords)
c_coords = coords
else:
self._fcoords = coords
c_coords = lattice.get_cartesian_coords(coords)
if to_unit_cell:
self._fcoords = np.mod(self._fcoords, 1)
c_coords = lattice.get_cartesian_coords(self._fcoords)
super(PeriodicSite, self).__init__(atoms_n_occu, c_coords, properties)
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between Sites
with different elements.
"""
return sum([el.Z for el in self._species.keys()])
@property
def lattice(self):
"""
The lattice associated with the site.
"""
return self._lattice
@property
def frac_coords(self):
"""
A copy of the fractional coordinates of the site.
"""
return np.copy(self._fcoords)
@property
def a(self):
"""
Fractional a coordinate
"""
return self._fcoords[0]
@property
def b(self):
"""
Fractional b coordinate
"""
return self._fcoords[1]
@property
def c(self):
"""
Fractional c coordinate
"""
return self._fcoords[2]
@property
def to_unit_cell(self):
"""
Copy of PeriodicSite translated to the unit cell.
"""
return PeriodicSite(self._species, np.mod(self._fcoords, 1),
self._lattice, properties=self._properties)
def is_periodic_image(self, other, tolerance=1e-8, check_lattice=True):
"""
Returns True if sites are periodic images of each other.
Args:
other (PeriodicSite): Other site
tolerance (float): Tolerance to compare fractional coordinates
check_lattice (bool): Whether to check if the two sites have the
same lattice.
Returns:
bool: True if sites are periodic images of each other.
"""
if check_lattice and self._lattice != other._lattice:
return False
if self._species != other._species:
return False
frac_diff = pbc_diff(self._fcoords, other._fcoords)
return np.allclose(frac_diff, [0, 0, 0], atol=tolerance)
def __eq__(self, other):
return self._species == other._species and \
self._lattice == other._lattice and \
np.allclose(self._coords, other._coords,
atol=Site.position_atol) and \
self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def distance_and_image_from_frac_coords(self, fcoords, jimage=None):
"""
Gets distance between site and a fractional coordinate assuming
periodic boundary conditions. If the index jimage of two sites atom j
is not specified it selects the j image nearest to the i atom and
returns the distance and jimage indices in terms of lattice vector
translations. If the index jimage of atom j is specified it returns the
distance between the i atom and the specified jimage atom, the given
jimage is also returned.
Args:
fcoords (3x1 array): fcoords to get distance from.
jimage (3x1 array): Specific periodic image in terms of
lattice translations, e.g., [1,0,0] implies to take periodic
image that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
(distance, jimage): distance and periodic lattice translations
of the other site for which the distance applies.
"""
return self._lattice.get_distance_and_image(self._fcoords, fcoords,
jimage=jimage)
def distance_and_image(self, other, jimage=None):
"""
Gets distance and instance between two sites assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the j image nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations. If the index
jimage of atom j is specified it returns the distance between the ith
atom and the specified jimage atom, the given jimage is also returned.
Args:
other (PeriodicSite): Other site to get distance from.
jimage (3x1 array): Specific periodic image in terms of lattice
translations, e.g., [1,0,0] implies to take periodic image
that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
(distance, jimage): distance and periodic lattice translations
of the other site for which the distance applies.
"""
return self.distance_and_image_from_frac_coords(other._fcoords, jimage)
def distance(self, other, jimage=None):
"""
Get distance between two sites assuming periodic boundary conditions.
Args:
other (PeriodicSite): Other site to get distance from.
jimage (3x1 array): Specific periodic image in terms of lattice
translations, e.g., [1,0,0] implies to take periodic image
that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
distance (float): Distance between the two sites
"""
return self.distance_and_image(other, jimage)[0]
def __repr__(self):
return "PeriodicSite: {} ({:.4f}, {:.4f}, {:.4f}) [{:.4f}, {:.4f}, " \
"{:.4f}]".format(self.species_string, self._coords[0],
self._coords[1], self._coords[2],
self._fcoords[0], self._fcoords[1],
self._fcoords[2])
def as_dict(self, verbosity=0):
"""
Json-serializable dict representation of PeriodicSite.
Args:
verbosity (int): Verbosity level. Default of 0 only includes the
matrix representation. Set to 1 for more details such as
cartesian coordinates, etc.
"""
species_list = []
for spec, occu in self._species.items():
d = spec.as_dict()
del d["@module"]
del d["@class"]
d["occu"] = occu
species_list.append(d)
d = {"species": species_list,
"abc": [float(c) for c in self._fcoords],
"lattice": self._lattice.as_dict(verbosity=verbosity),
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
if verbosity > 0:
d["xyz"] = [float(c) for c in self._coords]
d["label"] = self.species_string
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d, lattice=None):
"""
Create PeriodicSite from dict representation.
Args:
d (dict): dict representation of PeriodicSite
lattice: Optional lattice to override lattice specified in d.
Useful for ensuring all sites in a structure share the same
lattice.
Returns:
PeriodicSite
"""
atoms_n_occu = {}
for sp_occu in d["species"]:
if "oxidation_state" in sp_occu and Element.is_valid_symbol(
sp_occu["element"]):
sp = Specie.from_dict(sp_occu)
elif "oxidation_state" in sp_occu:
sp = DummySpecie.from_dict(sp_occu)
else:
sp = Element(sp_occu["element"])
atoms_n_occu[sp] = sp_occu["occu"]
props = d.get("properties", None)
lattice = lattice if lattice else Lattice.from_dict(d["lattice"])
return cls(atoms_n_occu, d["abc"], lattice, properties=props)
| xhqu1981/pymatgen | pymatgen/core/sites.py | Python | mit | 18,993 | [
"pymatgen"
] | 6fef8486ebe0a7c18b62ff7ce0b847cfe35b8632be24d039022923037798661b |
# This program reads a file representing web server logs in common log format and streams them into a PubSub topic
# with lag characteristics as determined by command-line arguments
import argparse
from google.cloud import pubsub_v1
import time
from datetime import datetime, timezone
import random
from anytree.importer import DictImporter
import json
from multiprocessing import Process
parser = argparse.ArgumentParser(__file__, description="event_generator")
parser.add_argument("--taxonomy", "-x", dest="taxonomy_fp",
help="A .json file representing a taxonomy of web resources",
default="taxonomy.json")
parser.add_argument("--users_fp", "-u", dest="users_fp",
help="A .csv file of users",
default="users.csv")
parser.add_argument("--off_to_on", "-off", dest="off_to_on_prob", type=float,
help="A float representing the probability that a user who is offline will come online",
default=.25)
parser.add_argument("--on_to_off", "-on", dest="on_to_off_prob", type=float,
help="A float representing the probability that a user who is online will go offline",
default=.1)
parser.add_argument("--max_lag_millis", '-l', dest="max_lag_millis", type=int,
help="An integer representing the maximum amount of lag in millisecond", default=250)
parser.add_argument("--project_id", "-p", type=str, dest="project_id", help="A GCP Project ID", required=True)
parser.add_argument("--topic_name", "-t", dest="topic_name", type=str,
help="The name of the topic where the messages to be published", required=True)
avg_secs_between_events = 5
args = parser.parse_args()
taxonomy_fp = args.taxonomy_fp
users_fp = args.users_fp
online_to_offline_probability = args.on_to_off_prob
offline_to_online_probability = args.off_to_on_prob
max_lag_millis = args.max_lag_millis
project_id = args.project_id
topic_name = args.topic_name
min_file_size_bytes = 100
max_file_size_bytes = 500
verbs = ["GET"]
responses = [200]
log_fields = ["ip", "user_id", "lat", "lng", "timestamp", "http_request",
"http_response", "num_bytes", "user_agent"]
def extract_resources(taxonomy_filepath):
"""
Reads a .json representing a taxonomy and returns
a data structure representing their hierarchical relationship
:param taxonomy_file: a string representing a path to a .json file
:return: Node representing root of taxonomic tree
"""
try:
with open(taxonomy_filepath, 'r') as fp:
json_str = fp.read()
json_data = json.loads(json_str)
root = DictImporter().import_(json_data)
finally:
fp.close()
return root
def read_users(users_fp):
"""
Reads a .csv from @user_fp representing users into a list of dictionaries,
each elt of which represents a user
:param user_fp: a .csv file where each line represents a user
:return: a list of dictionaries
"""
users = []
with open(users_fp, 'r') as fp:
fields = fp.readline().rstrip().split(",")
for line in fp:
user = dict(zip(fields, line.rstrip().split(",")))
users.append(user)
return users
def sleep_then_publish_burst(burst, publisher, topic_path):
"""
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes
to track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
sleep_secs = random.uniform(0, max_lag_millis/1000)
time.sleep(sleep_secs)
publish_burst(burst, publisher, topic_path)
def publish_burst(burst, publisher, topic_path):
"""
Publishes and prints each event
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes to
track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
for event_dict in burst:
json_str = json.dumps(event_dict)
data = json_str.encode('utf-8')
publisher.publish(topic_path, data=data, timestamp=event_dict['timestamp'])
def create_user_process(user, root):
"""
Code for continuously-running process representing a user publishing
events to pubsub
:param user: a dictionary representing characteristics of the user
:param root: an instance of AnyNode representing the home page of a website
:param num_events_counter: a variable shared among all processes used to track the number of events published
:return:
"""
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_name)
user['page'] = root
user['is_online'] = True
user['offline_events'] = []
while True:
time_between_events = random.uniform(0, avg_secs_between_events * 2)
time.sleep(time_between_events)
prob = random.random()
event = generate_event(user)
if user['is_online']:
if prob < online_to_offline_probability:
user['is_online'] = False
user['offline_events'] = [event]
else:
sleep_then_publish_burst([event], publisher, topic_path)
else:
user['offline_events'].append(event)
if prob < offline_to_online_probability:
user['is_online'] = True
sleep_then_publish_burst(user['offline_events'], publisher, topic_path)
user['offline_events'] = []
def generate_event(user):
"""
Returns a dictionary representing an event
:param user:
:return:
"""
user['page'] = get_next_page(user)
uri = str(user['page'].name)
event_time = datetime.now(tz=timezone.utc)
current_time_str = event_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
file_size_bytes = random.choice(range(min_file_size_bytes, max_file_size_bytes))
http_request = "\"{} {} HTTP/1.0\"".format(random.choice(verbs), uri)
http_response = random.choice(responses)
event_values = [user['ip'], user['id'], float(user['lat']), float(user['lng']), current_time_str, http_request,
http_response, file_size_bytes, user['user_agent']]
return dict(zip(log_fields, event_values))
def get_next_page(user):
"""
Consults the user's representation of the web site taxonomy to determine the next page that they visit
:param user:
:return:
"""
possible_next_pages = [user['page']]
if not user['page'].is_leaf:
possible_next_pages += list(user['page'].children)
if (user['page'].parent != None):
possible_next_pages += [user['page'].parent]
next_page = random.choice(possible_next_pages)
return next_page
if __name__ == '__main__':
users = read_users(users_fp)
root = extract_resources(taxonomy_fp)
processes = [Process(target=create_user_process, args=(user, root))
for user in users]
[process.start() for process in processes]
while True:
time.sleep(1) | GoogleCloudPlatform/training-data-analyst | quests/dataflow_python/streaming_event_generator.py | Python | apache-2.0 | 7,257 | [
"VisIt"
] | baa3fb964f6a1fdec70b83c147731f2860a2fc32fea4149381de762a1c3f7940 |
from selenium import webdriver
from torProfile import TorProfile
import selenium
import subprocess
import os
import signal
import time
import sys
import random
sleep_time = 5.0
browse_time = 120.0 # TODO: Find good value. 2 minutes?
load_timeout = 120.0
iface = "eth1"
dump_path = "PatternDumps"
urls = ["http://cbsnews.com", "http://google.com", "http://nrk.no", "http://vimeo.com", "http://wikipedia.org", "http://youtube.com"]
def startProgress():
global progress_x
sys.stdout.write("Browsing web page: [" + "-"*40 + "]" + chr(8)*41)
sys.stdout.flush()
progress_x = 0
def progress(x):
global progress_x
x = int(x * 40 // 100)
sys.stdout.write("#" * (x - progress_x))
sys.stdout.flush()
progress_x = x
def endProgress():
sys.stdout.write("#" * (40 - progress_x) + "]\n\n")
sys.stdout.flush()
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def getFilename(dir):
max_i = -1
try:
max_i = max([int(x.split(".")[0]) for x in os.listdir(dir)])
except:
pass
return "%d.cap" % (max_i + 1)
def loadPage(url):
print "Requesting site %s through Tor" % url
driver = webdriver.Firefox(firefox_profile=TorProfile().p)
driver.set_page_load_timeout(load_timeout)
try:
t = browse_time
driver.get(url)
print "Successfully reached %s\n" % url
startProgress()
while t > 0:
progress( ((browse_time-t)/browse_time) * 100 )
time.sleep(1)
t -= 1
endProgress()
driver.close()
time.sleep(sleep_time)
except selenium.common.exceptions.TimeoutException:
print "Error lading page: timed out"
time.sleep(sleep_time)
driver.close()
return -1
except (KeyboardInterrupt, SystemExit):
driver.close()
raise
except:
print "Unexpected error when loading page:", sys.exc_info()[0]
time.sleep(sleep_time)
driver.close()
raise
def startTshark(f_path):
print "Capturing on interface %s" % iface
command = "tshark -f tcp -i %s -w %s" % (iface, f_path)
FNULL = open(os.devnull, 'w')
tshark_proc = subprocess.Popen(command, stdout=FNULL, close_fds=True, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
return tshark_proc.pid
def stopTshark(pid):
try:
os.killpg(pid, signal.SIGTERM)
except:
print "Could not stop tshark process"
FNULL = open(os.devnull, 'w')
subprocess.Popen("killall tshark", stdout=FNULL, close_fds=True, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
def removeFile(f_path):
os.remove(f_path)
def captureWebsite(url):
# Create directory for URL
folder = (url.split("://")[1]).split("/")[0]
dir = "%s/%s" % (dump_path, folder)
mkdir(dir)
# Build file path for website visit instance and start capture
f_path = "%s/%s" % (dir, getFilename(dir))
tshark_pid = startTshark(f_path)
try:
s = loadPage(url)
stopTshark(tshark_pid)
if s == -1:
removeFile(f_path)
except (KeyboardInterrupt, SystemExit):
stopTshark(tshark_pid)
removeFile(f_path)
sys.exit()
except:
print "Unexpected error when capturing website:", sys.exc_info()[0]
stopTshark(tshark_pid)
removeFile(f_path)
raise
def captureRandomlyFromList(number):
with open("safelist.csv", "r") as f:
sites = ["http://%s" % x.split(",")[1][:-1] for x in f]
f.close()
while number > 0:
captureWebsite(sites[random.randint(0,len(sites)-1)])
number -= 1
def openWorldList(n):
with open("openlist.csv", "r") as f:
sites = ["http://%s" % x.split(",")[1][:-1] for x in f]
sites = sites[:n]
f.close()
return sites
if __name__=="__main__":
closed_world = False
n = 0
try:
model = sys.argv[1]
if model == "closed":
closed_world = True
elif model == "open":
closed_world = False
n = int(sys.argv[2])
else:
raise
except:
print "Usage: python %s <open/closed> <number of visits (given open world)>" % sys.argv[0]
sys.exit()
if closed_world:
dump_path += "/closed"
for url in urls:
captureWebsite(url)
else:
dump_path += "/open"
urls = openWorldList(n)
for url in urls:
captureWebsite(url) | chhans/tor-automation | patterncapture.py | Python | mit | 3,938 | [
"VisIt"
] | 63dc1946202554c5359ed0fac15217846482687b5fd046142309eb78ab32c4f4 |
# encoding: utf-8
"calculator.py - module for choosing a calculator."
import gtk
from gettext import gettext as _
import os
import numpy as np
from copy import copy
from ase.gui.setupwindow import SetupWindow
from ase.gui.progress import DefaultProgressIndicator, GpawProgressIndicator
from ase.gui.widgets import pack, oops, cancel_apply_ok
from ase import Atoms
from ase.data import chemical_symbols
import ase
# Asap and GPAW may be imported if selected.
introtext = _("""\
To make most calculations on the atoms, a Calculator object must first
be associated with it. ASE supports a number of calculators, supporting
different elements, and implementing different physical models for the
interatomic interactions.\
""")
# Informational text about the calculators
lj_info_txt = _("""\
The Lennard-Jones pair potential is one of the simplest
possible models for interatomic interactions, mostly
suitable for noble gasses and model systems.
Interactions are described by an interaction length and an
interaction strength.\
""")
emt_info_txt = _("""\
The EMT potential is a many-body potential, giving a
good description of the late transition metals crystalling
in the FCC crystal structure. The elements described by the
main set of EMT parameters are Al, Ni, Cu, Pd, Ag, Pt, and
Au, the Al potential is however not suitable for materials
science application, as the stacking fault energy is wrong.
A number of parameter sets are provided.
<b>Default parameters:</b>
The default EMT parameters, as published in K. W. Jacobsen,
P. Stoltze and J. K. Nørskov, <i>Surf. Sci.</i> <b>366</b>, 394 (1996).
<b>Alternative Cu, Ag and Au:</b>
An alternative set of parameters for Cu, Ag and Au,
reoptimized to experimental data including the stacking
fault energies by Torben Rasmussen (partly unpublished).
<b>Ruthenium:</b>
Parameters for Ruthenium, as published in J. Gavnholt and
J. Schiøtz, <i>Phys. Rev. B</i> <b>77</b>, 035404 (2008).
<b>Metallic glasses:</b>
Parameters for MgCu and CuZr metallic glasses. MgCu
parameters are in N. P. Bailey, J. Schiøtz and
K. W. Jacobsen, <i>Phys. Rev. B</i> <b>69</b>, 144205 (2004).
CuZr in A. Paduraru, A. Kenoufi, N. P. Bailey and
J. Schiøtz, <i>Adv. Eng. Mater.</i> <b>9</b>, 505 (2007).
""")
aseemt_info_txt = _("""\
The EMT potential is a many-body potential, giving a
good description of the late transition metals crystalling
in the FCC crystal structure. The elements described by the
main set of EMT parameters are Al, Ni, Cu, Pd, Ag, Pt, and
Au. In addition, this implementation allows for the use of
H, N, O and C adatoms, although the description of these is
most likely not very good.
<b>This is the ASE implementation of EMT.</b> For large
simulations the ASAP implementation is more suitable; this
implementation is mainly to make EMT available when ASAP is
not installed.
""")
brenner_info_txt = _("""\
The Brenner potential is a reactive bond-order potential for
carbon and hydrocarbons. As a bond-order potential, it takes
into account that carbon orbitals can hybridize in different
ways, and that carbon can form single, double and triple
bonds. That the potential is reactive means that it can
handle gradual changes in the bond order as chemical bonds
are formed or broken.
The Brenner potential is implemented in Asap, based on a
C implentation published at http://www.rahul.net/pcm/brenner/ .
The potential is documented here:
Donald W Brenner, Olga A Shenderova, Judith A Harrison,
Steven J Stuart, Boris Ni and Susan B Sinnott:
"A second-generation reactive empirical bond order (REBO)
potential energy expression for hydrocarbons",
J. Phys.: Condens. Matter 14 (2002) 783-802.
doi: 10.1088/0953-8984/14/4/312
""")
gpaw_info_txt = _("""\
GPAW implements Density Functional Theory using a
<b>G</b>rid-based real-space representation of the wave
functions, and the <b>P</b>rojector <b>A</b>ugmented <b>W</b>ave
method for handling the core regions.
""")
aims_info_txt = _("""\
FHI-aims is an external package implementing density
functional theory and quantum chemical methods using
all-electron methods and a numeric local orbital basis set.
For full details, see http://www.fhi-berlin.mpg.de/aims/
or Comp. Phys. Comm. v180 2175 (2009). The ASE
documentation contains information on the keywords and
functionalities available within this interface.
""")
aims_pbc_warning_text = _("""\
WARNING:
Your system seems to have more than zero but less than
three periodic dimensions. Please check that this is
really what you want to compute. Assuming full
3D periodicity for this calculator.""")
vasp_info_txt = _("""\
VASP is an external package implementing density
functional functional theory using pseudopotentials
or the projector-augmented wave method together
with a plane wave basis set. For full details, see
http://cms.mpi.univie.ac.at/vasp/vasp/
""")
emt_parameters = (
(_("Default (Al, Ni, Cu, Pd, Ag, Pt, Au)"), None),
(_("Alternative Cu, Ag and Au"), "EMTRasmussenParameters"),
(_("Ruthenium"), "EMThcpParameters"),
(_("CuMg and CuZr metallic glass"), "EMTMetalGlassParameters")
)
class SetCalculator(SetupWindow):
"Window for selecting a calculator."
# List the names of the radio button attributes
radios = ("none", "lj", "emt", "aseemt", "brenner", "gpaw", "aims", "vasp")
# List the names of the parameter dictionaries
paramdicts = ("lj_parameters","gpaw_parameters","aims_parameters",)
# The name used to store parameters on the gui object
classname = "SetCalculator"
def __init__(self, gui):
SetupWindow.__init__(self)
self.set_title(_("Select calculator"))
vbox = gtk.VBox()
# Intoductory text
self.packtext(vbox, introtext)
pack(vbox, [gtk.Label(_("Calculator:"))])
# No calculator (the default)
self.none_radio = gtk.RadioButton(None, _("None"))
pack(vbox, [self.none_radio])
# Lennard-Jones
self.lj_radio = gtk.RadioButton(self.none_radio,
_("Lennard-Jones (ASAP)"))
self.lj_setup = gtk.Button(_("Setup"))
self.lj_info = InfoButton(lj_info_txt)
self.lj_setup.connect("clicked", self.lj_setup_window)
self.pack_line(vbox, self.lj_radio, self.lj_setup, self.lj_info)
# EMT
self.emt_radio = gtk.RadioButton(
self.none_radio, _("EMT - Effective Medium Theory (ASAP)"))
self.emt_setup = gtk.combo_box_new_text()
self.emt_param_info = {}
for p in emt_parameters:
self.emt_setup.append_text(p[0])
self.emt_param_info[p[0]] = p[1]
self.emt_setup.set_active(0)
self.emt_info = InfoButton(emt_info_txt)
self.pack_line(vbox, self.emt_radio, self.emt_setup, self.emt_info)
# EMT (ASE implementation)
self.aseemt_radio = gtk.RadioButton(
self.none_radio, _("EMT - Effective Medium Theory (ASE)"))
self.aseemt_info = InfoButton(aseemt_info_txt)
self.pack_line(vbox, self.aseemt_radio, None, self.aseemt_info)
# Brenner potential
self.brenner_radio = gtk.RadioButton(
self.none_radio, _("Brenner Potential (ASAP)"))
self.brenner_info = InfoButton(brenner_info_txt)
self.pack_line(vbox, self.brenner_radio, None, self.brenner_info)
# GPAW
self.gpaw_radio = gtk.RadioButton(self.none_radio,
_("Density Functional Theory (GPAW)")
)
self.gpaw_setup = gtk.Button(_("Setup"))
self.gpaw_info = InfoButton(gpaw_info_txt)
self.gpaw_setup.connect("clicked", self.gpaw_setup_window)
self.pack_line(vbox, self.gpaw_radio, self.gpaw_setup, self.gpaw_info)
# FHI-aims
self.aims_radio = gtk.RadioButton(self.none_radio,
_("Density Functional Theory "
"(FHI-aims)"))
self.aims_setup = gtk.Button(_("Setup"))
self.aims_info = InfoButton(aims_info_txt)
self.aims_setup.connect("clicked", self.aims_setup_window)
self.pack_line(vbox, self.aims_radio, self.aims_setup, self.aims_info)
# VASP
self.vasp_radio = gtk.RadioButton(self.none_radio,
_("Density Functional Theory "
"(VASP)"))
self.vasp_setup = gtk.Button(_("Setup"))
self.vasp_info = InfoButton(vasp_info_txt)
self.vasp_setup.connect("clicked", self.vasp_setup_window)
self.pack_line(vbox, self.vasp_radio, self.vasp_setup, self.vasp_info)
# Buttons etc.
pack(vbox, gtk.Label(""))
buts = cancel_apply_ok(cancel=lambda widget: self.destroy(),
apply=self.apply,
ok=self.ok)
pack(vbox, [buts], end=True, bottom=True)
self.check = gtk.CheckButton(_("Check that the calculator is "
"reasonable."))
self.check.set_active(True)
fr = gtk.Frame()
fr.add(self.check)
fr.show_all()
pack(vbox, [fr], end=True, bottom=True)
# Finalize setup
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
self.load_state()
def pack_line(self, box, radio, setup, info):
hbox = gtk.HBox()
hbox.pack_start(radio, 0, 0)
hbox.pack_start(gtk.Label(" "), 0, 0)
hbox.pack_end(info, 0, 0)
if setup is not None:
radio.connect("toggled", self.radio_toggled, setup)
setup.set_sensitive(False)
hbox.pack_end(setup, 0, 0)
hbox.show_all()
box.pack_start(hbox, 0, 0)
def radio_toggled(self, radio, button):
button.set_sensitive(radio.get_active())
def lj_setup_window(self, widget):
if not self.get_atoms():
return
lj_param = getattr(self, "lj_parameters", None)
LJ_Window(self, lj_param, "lj_parameters")
# When control is retuned, self.lj_parameters has been set.
def gpaw_setup_window(self, widget):
if not self.get_atoms():
return
gpaw_param = getattr(self, "gpaw_parameters", None)
GPAW_Window(self, gpaw_param, "gpaw_parameters")
# When control is retuned, self.gpaw_parameters has been set.
def aims_setup_window(self, widget):
if not self.get_atoms():
return
aims_param = getattr(self, "aims_parameters", None)
AIMS_Window(self, aims_param, "aims_parameters")
# When control is retuned, self.aims_parameters has been set.
def vasp_setup_window(self, widget):
if not self.get_atoms():
return
vasp_param = getattr(self, "vasp_parameters", None)
VASP_Window(self, vasp_param, "vasp_parameters")
# When control is retuned, self.vasp_parameters has been set.
def get_atoms(self):
"Make an atoms object from the active frame"
images = self.gui.images
frame = self.gui.frame
if images.natoms < 1:
oops(_("No atoms present"))
return False
self.atoms = Atoms(positions=images.P[frame],
symbols=images.Z,
cell=images.A[frame],
pbc=images.pbc,
magmoms=images.M[frame])
if not images.dynamic.all():
from ase.constraints import FixAtoms
self.atoms.set_constraint(FixAtoms(mask=1-images.dynamic))
return True
def apply(self, *widget):
if self.do_apply():
self.save_state()
return True
else:
return False
def do_apply(self):
nochk = not self.check.get_active()
self.gui.simulation["progress"] = DefaultProgressIndicator()
if self.none_radio.get_active():
self.gui.simulation['calc'] = None
return True
elif self.lj_radio.get_active():
if nochk or self.lj_check():
self.choose_lj()
return True
elif self.emt_radio.get_active():
if nochk or self.emt_check():
self.choose_emt()
return True
elif self.aseemt_radio.get_active():
if nochk or self.aseemt_check():
self.choose_aseemt()
return True
elif self.brenner_radio.get_active():
if nochk or self.brenner_check():
self.choose_brenner()
return True
elif self.gpaw_radio.get_active():
if nochk or self.gpaw_check():
self.choose_gpaw()
return True
elif self.aims_radio.get_active():
if nochk or self.aims_check():
self.choose_aims()
return True
elif self.vasp_radio.get_active():
if nochk or self.vasp_check():
self.choose_vasp()
return True
return False
def ok(self, *widget):
if self.apply():
self.destroy()
def save_state(self):
state = {}
for r in self.radios:
radiobutton = getattr(self, r+"_radio")
if radiobutton.get_active():
state["radio"] = r
state["emtsetup"] = self.emt_setup.get_active()
state["check"] = self.check.get_active()
for p in self.paramdicts:
if hasattr(self, p):
state[p] = getattr(self, p)
self.gui.module_state[self.classname] = state
def load_state(self):
try:
state = self.gui.module_state[self.classname]
except KeyError:
return
r = state["radio"]
radiobutton = getattr(self, r + "_radio")
radiobutton.set_active(True)
self.emt_setup.set_active(state["emtsetup"])
self.check.set_active(state["check"])
for p in self.paramdicts:
if state.has_key(p):
setattr(self, p, state[p])
def lj_check(self):
try:
import asap3
except ImportError:
oops(_("ASAP is not installed. (Failed to import asap3)"))
return False
if not hasattr(self, "lj_parameters"):
oops(_("You must set up the Lennard-Jones parameters"))
return False
try:
self.atoms.set_calculator(asap3.LennardJones(**self.lj_parameters))
except (asap3.AsapError, TypeError, ValueError), e:
oops(_("Could not create useful Lennard-Jones calculator."),
str(e))
return False
return True
def choose_lj(self):
# Define a function on the fly!
import asap3
def lj_factory(p=self.lj_parameters, lj=asap3.LennardJones):
return lj(**p)
self.gui.simulation["calc"] = lj_factory
def emt_get(self):
import asap3
provider_name = self.emt_setup.get_active_text()
provider = self.emt_param_info[provider_name]
if provider is not None:
provider = getattr(asap3, provider)
return (asap3.EMT, provider, asap3)
def emt_check(self):
if not self.get_atoms():
return False
try:
emt, provider, asap3 = self.emt_get()
except ImportError:
oops(_("ASAP is not installed. (Failed to import asap3)"))
return False
try:
if provider is not None:
self.atoms.set_calculator(emt(provider()))
else:
self.atoms.set_calculator(emt())
except (asap3.AsapError, TypeError, ValueError), e:
oops(_("Could not attach EMT calculator to the atoms."),
str(e))
return False
return True
def choose_emt(self):
emt, provider, asap3 = self.emt_get()
if provider is None:
emt_factory = emt
else:
def emt_factory(emt=emt, prov=provider):
return emt(prov())
self.gui.simulation["calc"] = emt_factory
def aseemt_check(self):
return self.element_check("ASE EMT", ['H', 'Al', 'Cu', 'Ag', 'Au',
'Ni', 'Pd', 'Pt', 'C', 'N', 'O'])
def brenner_check(self):
try:
import asap3
except ImportError:
oops(_("ASAP is not installed. (Failed to import asap3)"))
return False
return self.element_check("Brenner potential", ['H', 'C', 'Si'])
def choose_brenner(self):
import asap3
self.gui.simulation["calc"] = asap3.BrennerPotential
def choose_aseemt(self):
import ase.calculators.emt
self.gui.simulation["calc"] = ase.calculators.emt.EMT
# In case Asap has been imported
ase.calculators.emt.EMT.disabled = False
def gpaw_check(self):
try:
import gpaw
except ImportError:
oops(_("GPAW is not installed. (Failed to import gpaw)"))
return False
if not hasattr(self, "gpaw_parameters"):
oops(_("You must set up the GPAW parameters"))
return False
return True
def choose_gpaw(self):
# This reuses the same GPAW object.
try:
import gpaw
except ImportError:
oops(_("GPAW is not installed. (Failed to import gpaw)"))
return False
p = self.gpaw_parameters
use = ["xc", "kpts", "mode"]
if p["use_h"]:
use.append("h")
else:
use.append("gpts")
if p["mode"] == "lcao":
use.append("basis")
gpaw_param = {}
for s in use:
gpaw_param[s] = p[s]
if p["use mixer"]:
mx = getattr(gpaw, p["mixer"])
mx_args = {}
mx_arg_n = ["beta", "nmaxold", "weight"]
if p["mixer"] == "MixerDiff":
mx_arg_n.extend(["beta_m", "nmaxold_m", "weight_m"])
for s in mx_arg_n:
mx_args[s] = p[s]
gpaw_param["mixer"] = mx(**mx_args)
progress = GpawProgressIndicator()
self.gui.simulation["progress"] = progress
gpaw_param["txt"] = progress.get_gpaw_stream()
gpaw_calc = gpaw.GPAW(**gpaw_param)
def gpaw_factory(calc = gpaw_calc):
return calc
self.gui.simulation["calc"] = gpaw_factory
def aims_check(self):
if not hasattr(self, "aims_parameters"):
oops(_("You must set up the FHI-aims parameters"))
return False
return True
def choose_aims(self):
param = self.aims_parameters
from ase.calculators.aims import Aims
calc_aims = Aims(**param)
def aims_factory(calc = calc_aims):
return calc
self.gui.simulation["calc"] = aims_factory
def vasp_check(self):
if not hasattr(self, "vasp_parameters"):
oops(_("You must set up the VASP parameters"))
return False
return True
def choose_vasp(self):
param = self.vasp_parameters
from ase.calculators.vasp import Vasp
calc_vasp = Vasp(**param)
def vasp_factory(calc = calc_vasp):
return calc
self.gui.simulation["calc"] = vasp_factory
def element_check(self, name, elements):
"Check that all atoms are allowed"
elements = [ase.data.atomic_numbers[s] for s in elements]
elements_dict = {}
for e in elements:
elements_dict[e] = True
if not self.get_atoms():
return False
try:
for e in self.atoms.get_atomic_numbers():
elements_dict[e]
except KeyError:
oops(_("Element %(sym)s not allowed by the '%(name)s' calculator")
% dict(sym=ase.data.chemical_symbols[e], name=name))
return False
return True
class InfoButton(gtk.Button):
def __init__(self, txt):
gtk.Button.__init__(self, _("Info"))
self.txt = txt
self.connect('clicked', self.run)
def run(self, widget):
dialog = gtk.MessageDialog(flags=gtk.DIALOG_MODAL,
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_CLOSE)
dialog.set_markup(self.txt)
dialog.connect('response', lambda x, y: dialog.destroy())
dialog.show()
class LJ_Window(gtk.Window):
def __init__(self, owner, param, attrname):
gtk.Window.__init__(self)
self.set_title(_("Lennard-Jones parameters"))
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
atnos = atoms.get_atomic_numbers()
found = {}
for z in atnos:
found[z] = True
self.present = found.keys()
self.present.sort() # Sorted list of atomic numbers
nelem = len(self.present)
vbox = gtk.VBox()
label = gtk.Label(_("Specify the Lennard-Jones parameters here"))
pack(vbox, [label])
pack(vbox, gtk.Label(""))
pack(vbox, [gtk.Label(_("Epsilon (eV):"))])
tbl, self.epsilon_adj = self.makematrix(self.present)
pack(vbox, [tbl])
pack(vbox, gtk.Label(""))
pack(vbox, [gtk.Label(_(u"Sigma (Å):"))])
tbl, self.sigma_adj = self.makematrix(self.present)
pack(vbox, [tbl])
# TRANSLATORS: Shift roughly means adjust (about a potential)
self.modif = gtk.CheckButton(_("Shift to make smooth at cutoff"))
self.modif.set_active(True)
pack(vbox, gtk.Label(""))
pack(vbox, self.modif)
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
vbox.show()
self.add(vbox)
# Now, set the parameters
if param and param['elements'] == self.present:
self.set_param(self.epsilon_adj, param["epsilon"], nelem)
self.set_param(self.sigma_adj, param["sigma"], nelem)
self.modif.set_active(param["modified"])
self.show()
self.grab_add() # Lock all other windows
def makematrix(self, present):
nelem = len(present)
adjdict = {}
tbl = gtk.Table(2+nelem, 2+nelem)
for i in range(nelem):
s = chemical_symbols[present[i]]
tbl.attach(gtk.Label(" " + str(present[i])), 0, 1, i, i+1)
tbl.attach(gtk.Label(" "+s+" "), 1, 2, i, i+1)
tbl.attach(gtk.Label(str(present[i])), i+2, i+3, 1+nelem, 2+nelem)
tbl.attach(gtk.Label(s), i+2, i+3, nelem, 1+nelem)
for j in range(i+1):
adj = gtk.Adjustment(1.0, 0.0, 100.0, 0.1)
spin = gtk.SpinButton(adj, 0.1, 3)
tbl.attach(spin, 2+j, 3+j, i, i+1)
adjdict[(i,j)] = adj
tbl.show_all()
return tbl, adjdict
def set_param(self, adj, params, n):
for i in range(n):
for j in range(n):
if j <= i:
adj[(i,j)].value = params[i,j]
def get_param(self, adj, params, n):
for i in range(n):
for j in range(n):
if j <= i:
params[i,j] = params[j,i] = adj[(i,j)].value
def destroy(self):
self.grab_remove()
gtk.Window.destroy(self)
def ok(self, *args):
params = {}
params["elements"] = copy(self.present)
n = len(self.present)
eps = np.zeros((n,n))
self.get_param(self.epsilon_adj, eps, n)
sigma = np.zeros((n,n))
self.get_param(self.sigma_adj, sigma, n)
params["epsilon"] = eps
params["sigma"] = sigma
params["modified"] = self.modif.get_active()
setattr(self.owner, self.attrname, params)
self.destroy()
class GPAW_Window(gtk.Window):
gpaw_xc_list = ['LDA', 'PBE', 'RPBE', 'revPBE']
gpaw_xc_default = 'PBE'
def __init__(self, owner, param, attrname):
gtk.Window.__init__(self)
self.set_title(_("GPAW parameters"))
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
self.ucell = atoms.get_cell()
self.size = tuple([self.ucell[i,i] for i in range(3)])
self.pbc = atoms.get_pbc()
self.orthogonal = self.isorthogonal(self.ucell)
self.natoms = len(atoms)
vbox = gtk.VBox()
#label = gtk.Label("Specify the GPAW parameters here")
#pack(vbox, [label])
# Print some info
txt = _("%i atoms.\n") % (self.natoms,)
if self.orthogonal:
txt += _(u"Orthogonal unit cell: %.2f x %.2f x %.2f Å.") % self.size
else:
txt += _("Non-orthogonal unit cell:\n")
txt += str(self.ucell)
pack(vbox, [gtk.Label(txt)])
# XC potential
self.xc = gtk.combo_box_new_text()
for i, x in enumerate(self.gpaw_xc_list):
self.xc.append_text(x)
if x == self.gpaw_xc_default:
self.xc.set_active(i)
pack(vbox, [gtk.Label(_("Exchange-correlation functional: ")),
self.xc])
# Grid spacing
self.radio_h = gtk.RadioButton(None, _("Grid spacing"))
self.h = gtk.Adjustment(0.18, 0.0, 1.0, 0.01)
self.h_spin = gtk.SpinButton(self.h, 0, 2)
pack(vbox, [self.radio_h, gtk.Label(" h = "), self.h_spin,
gtk.Label(_(u"Å"))])
self.radio_gpts = gtk.RadioButton(self.radio_h, _("Grid points"))
self.gpts = []
self.gpts_spin = []
for i in range(3):
g = gtk.Adjustment(4, 4, 1000, 4)
s = gtk.SpinButton(g, 0, 0)
self.gpts.append(g)
self.gpts_spin.append(s)
self.gpts_hlabel = gtk.Label("")
self.gpts_hlabel_format = _(u"h<sub>eff</sub> = (%.3f, %.3f, %.3f) Å")
pack(vbox, [self.radio_gpts, gtk.Label(" gpts = ("), self.gpts_spin[0],
gtk.Label(", "), self.gpts_spin[1], gtk.Label(", "),
self.gpts_spin[2], gtk.Label(") "), self.gpts_hlabel])
self.radio_h.connect("toggled", self.radio_grid_toggled)
self.radio_gpts.connect("toggled", self.radio_grid_toggled)
self.radio_grid_toggled(None)
for g in self.gpts:
g.connect("value-changed", self.gpts_changed)
self.h.connect("value-changed", self.h_changed)
# K-points
self.kpts = []
self.kpts_spin = []
for i in range(3):
if self.pbc[i] and self.orthogonal:
default = np.ceil(20.0 / self.size[i])
else:
default = 1
g = gtk.Adjustment(default, 1, 100, 1)
s = gtk.SpinButton(g, 0, 0)
self.kpts.append(g)
self.kpts_spin.append(s)
if not self.pbc[i]:
s.set_sensitive(False)
g.connect("value-changed", self.k_changed)
pack(vbox, [gtk.Label(_("k-points k = (")), self.kpts_spin[0],
gtk.Label(", "), self.kpts_spin[1], gtk.Label(", "),
self.kpts_spin[2], gtk.Label(")")])
self.kpts_label = gtk.Label("")
self.kpts_label_format = _(u"k-points x size: (%.1f, %.1f, %.1f) Å")
pack(vbox, [self.kpts_label])
self.k_changed()
# Spin polarized
self.spinpol = gtk.CheckButton(_("Spin polarized"))
pack(vbox, [self.spinpol])
pack(vbox, gtk.Label(""))
# Mode and basis functions
self.mode = gtk.combo_box_new_text()
self.mode.append_text(_("FD - Finite Difference (grid) mode"))
self.mode.append_text(_("LCAO - Linear Combination of Atomic "
"Orbitals"))
self.mode.set_active(0)
pack(vbox, [gtk.Label(_("Mode: ")), self.mode])
self.basis = gtk.combo_box_new_text()
self.basis.append_text(_("sz - Single Zeta"))
self.basis.append_text(_("szp - Single Zeta polarized"))
self.basis.append_text(_("dzp - Double Zeta polarized"))
self.basis.set_active(2) # dzp
pack(vbox, [gtk.Label(_("Basis functions: ")), self.basis])
pack(vbox, gtk.Label(""))
self.mode.connect("changed", self.mode_changed)
self.mode_changed()
# Mixer
self.use_mixer = gtk.CheckButton(_("Non-standard mixer parameters"))
pack(vbox, [self.use_mixer])
self.radio_mixer = gtk.RadioButton(None, "Mixer ")
self.radio_mixersum = gtk.RadioButton(self.radio_mixer, "MixerSum ")
self.radio_mixerdiff = gtk.RadioButton(self.radio_mixer, "MixerDiff")
pack(vbox, [self.radio_mixer, self.radio_mixersum,
self.radio_mixerdiff])
self.beta_adj = gtk.Adjustment(0.25, 0.0, 1.0, 0.05)
self.beta_spin = gtk.SpinButton(self.beta_adj, 0, 2)
self.nmaxold_adj = gtk.Adjustment(3, 1, 10, 1)
self.nmaxold_spin = gtk.SpinButton(self.nmaxold_adj, 0, 0)
self.weight_adj = gtk.Adjustment(50, 1, 500, 1)
self.weight_spin = gtk.SpinButton(self.weight_adj, 0, 0)
pack(vbox, [gtk.Label("beta = "), self.beta_spin,
gtk.Label(" nmaxold = "), self.nmaxold_spin,
gtk.Label(" weight = "), self.weight_spin])
self.beta_m_adj = gtk.Adjustment(0.70, 0.0, 1.0, 0.05)
self.beta_m_spin = gtk.SpinButton(self.beta_m_adj, 0, 2)
self.nmaxold_m_adj = gtk.Adjustment(2, 1, 10, 1)
self.nmaxold_m_spin = gtk.SpinButton(self.nmaxold_m_adj, 0, 0)
self.weight_m_adj = gtk.Adjustment(10, 1, 500, 1)
self.weight_m_spin = gtk.SpinButton(self.weight_m_adj, 0, 0)
pack(vbox, [gtk.Label("beta_m = "), self.beta_m_spin,
gtk.Label(" nmaxold_m = "), self.nmaxold_m_spin,
gtk.Label(" weight_m = "), self.weight_m_spin])
for but in (self.spinpol, self.use_mixer, self.radio_mixer,
self.radio_mixersum, self.radio_mixerdiff):
but.connect("clicked", self.mixer_changed)
self.mixer_changed()
# Eigensolver
# Poisson-solver
vbox.show()
self.add(vbox)
# Buttons at the bottom
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
# Set stored parameters
if param:
self.xc.set_active(param["xc#"])
if param["use_h"]:
self.radio_h.set_active(True)
else:
self.radio_gpts.set_active(True)
for i in range(3):
self.gpts[i].value = param["gpts"][i]
self.kpts[i].value = param["kpts"][i]
self.spinpol.set_active(param["spinpol"])
self.mode.set_active(param["mode#"])
self.basis.set_active(param["basis#"])
self.use_mixer.set_active(param["use mixer"])
getattr(self, "radio_" + param["mixer"].lower()).set_active(True)
for t in ("beta", "nmaxold", "weight", "beta_m", "nmaxold_m",
"weight_m"):
getattr(self, t+"_adj").value = param[t]
self.show()
self.grab_add() # Lock all other windows
def radio_grid_toggled(self, widget):
hmode = self.radio_h.get_active()
self.h_spin.set_sensitive(hmode)
for s in self.gpts_spin:
s.set_sensitive(not hmode)
self.gpts_changed()
def gpts_changed(self, *args):
if self.radio_gpts.get_active():
g = np.array([int(g.value) for g in self.gpts])
size = np.array([self.ucell[i,i] for i in range(3)])
txt = self.gpts_hlabel_format % tuple(size / g)
self.gpts_hlabel.set_markup(txt)
else:
self.gpts_hlabel.set_markup("")
def h_changed(self, *args):
h = self.h.value
for i in range(3):
g = 4 * round(self.ucell[i,i] / (4*h))
self.gpts[i].value = g
def k_changed(self, *args):
size = [self.kpts[i].value * np.sqrt(np.vdot(self.ucell[i],
self.ucell[i]))
for i in range(3)]
self.kpts_label.set_text(self.kpts_label_format % tuple(size))
def mode_changed(self, *args):
self.basis.set_sensitive(self.mode.get_active() == 1)
def mixer_changed(self, *args):
radios = (self.radio_mixer, self.radio_mixersum, self.radio_mixerdiff)
spin1 = (self.beta_spin, self.nmaxold_spin, self.weight_spin)
spin2 = (self.beta_m_spin, self.nmaxold_m_spin, self.weight_m_spin)
if self.use_mixer.get_active():
# Mixer parameters can be specified.
if self.spinpol.get_active():
self.radio_mixer.set_sensitive(False)
self.radio_mixersum.set_sensitive(True)
self.radio_mixerdiff.set_sensitive(True)
if self.radio_mixer.get_active():
self.radio_mixersum.set_active(True)
else:
self.radio_mixer.set_sensitive(True)
self.radio_mixersum.set_sensitive(False)
self.radio_mixerdiff.set_sensitive(False)
self.radio_mixer.set_active(True)
if self.radio_mixerdiff.get_active():
active = spin1 + spin2
passive = ()
else:
active = spin1
passive = spin2
for widget in active:
widget.set_sensitive(True)
for widget in passive:
widget.set_sensitive(False)
else:
# No mixer parameters
for widget in radios + spin1 + spin2:
widget.set_sensitive(False)
def isorthogonal(self, matrix):
ortho = True
for i in range(3):
for j in range(3):
if i != j and matrix[i][j] != 0.0:
ortho = False
return ortho
def ok(self, *args):
param = {}
param["xc"] = self.xc.get_active_text()
param["xc#"] = self.xc.get_active()
param["use_h"] = self.radio_h.get_active()
param["h"] = self.h.value
param["gpts"] = [int(g.value) for g in self.gpts]
param["kpts"] = [int(k.value) for k in self.kpts]
param["spinpol"] = self.spinpol.get_active()
param["mode"] = self.mode.get_active_text().split()[0].lower()
param["mode#"] = self.mode.get_active()
param["basis"] = self.basis.get_active_text().split()[0].lower()
param["basis#"] = self.basis.get_active()
param["use mixer"] = self.use_mixer.get_active()
if self.radio_mixer.get_active():
m = "Mixer"
elif self.radio_mixersum.get_active():
m = "MixerSum"
else:
assert self.radio_mixerdiff.get_active()
m = "MixerDiff"
param["mixer"] = m
for t in ("beta", "nmaxold", "weight", "beta_m", "nmaxold_m",
"weight_m"):
param[t] = getattr(self, t+"_adj").value
setattr(self.owner, self.attrname, param)
self.destroy()
class AIMS_Window(gtk.Window):
aims_xc_cluster = ['pw-lda','pz-lda','pbe','pbesol','rpbe','revpbe',
'blyp','am05','b3lyp','hse03','hse06','pbe0','pbesol0',
'hf','mp2']
aims_xc_periodic = ['pw-lda','pz-lda','pbe','pbesol','rpbe','revpbe',
'blyp','am05']
aims_xc_default = 'pbe'
aims_relativity_list = ['none','atomic_zora','zora']
aims_keyword_gui_list = ['xc','vdw_correction_hirshfeld','k_grid','spin','charge','relativistic',
'sc_accuracy_etot','sc_accuracy_eev','sc_accuracy_rho','sc_accuracy_forces',
'compute_forces','run_command','species_dir','default_initial_moment']
def __init__(self, owner, param, attrname):
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
self.periodic = atoms.get_pbc().all()
if not self.periodic and atoms.get_pbc().any():
aims_periodic_warning = True
self.periodic = True
else:
aims_periodic_warning = False
from ase.calculators.aims import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,input_keys
self.aims_keyword_list =float_keys+exp_keys+string_keys+int_keys+bool_keys+list_keys+input_keys
self.expert_keywords = []
natoms = len(atoms)
gtk.Window.__init__(self)
self.set_title(_("FHI-aims parameters"))
vbox = gtk.VBox()
vbox.set_border_width(5)
# Print some info
txt = _("%i atoms.\n") % (natoms)
if self.periodic:
self.ucell = atoms.get_cell()
txt += _("Periodic geometry, unit cell is:\n")
for i in range(3):
txt += "(%8.3f %8.3f %8.3f)\n" % (self.ucell[i][0], self.ucell[i][1], self.ucell[i][2])
self.xc_list = self.aims_xc_periodic
else:
txt += _("Non-periodic geometry.\n")
self.xc_list = self.aims_xc_cluster
pack(vbox, [gtk.Label(txt)])
# XC functional & dispersion correction
self.xc = gtk.combo_box_new_text()
self.xc_setup = False
self.TS = gtk.CheckButton(_("Hirshfeld-based dispersion correction"))
pack(vbox, [gtk.Label(_("Exchange-correlation functional: ")),self.xc])
pack(vbox, [self.TS])
pack(vbox, [gtk.Label("")])
# k-grid?
if self.periodic:
self.kpts = []
self.kpts_spin = []
for i in range(3):
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
g = gtk.Adjustment(default, 1, 100, 1)
s = gtk.SpinButton(g, 0, 0)
self.kpts.append(g)
self.kpts_spin.append(s)
g.connect("value-changed", self.k_changed)
pack(vbox, [gtk.Label(_("k-points k = (")), self.kpts_spin[0],
gtk.Label(", "), self.kpts_spin[1], gtk.Label(", "),
self.kpts_spin[2], gtk.Label(")")])
self.kpts_label = gtk.Label("")
self.kpts_label_format = _(u"k-points x size: (%.1f, %.1f, %.1f) Å")
pack(vbox, [self.kpts_label])
self.k_changed()
pack(vbox, gtk.Label(""))
# Spin polarized, charge, relativity
self.spinpol = gtk.CheckButton(_("Spin / initial moment "))
self.spinpol.connect('toggled',self.spinpol_changed)
self.moment = gtk.Adjustment(0,-100,100,0.1)
self.moment_spin = gtk.SpinButton(self.moment, 0, 0)
self.moment_spin.set_digits(2)
self.moment_spin.set_sensitive(False)
self.charge = gtk.Adjustment(0,-100,100,0.1)
self.charge_spin = gtk.SpinButton(self.charge, 0, 0)
self.charge_spin.set_digits(2)
self.relativity_type = gtk.combo_box_new_text()
for i, x in enumerate(self.aims_relativity_list):
self.relativity_type.append_text(x)
self.relativity_type.connect('changed',self.relativity_changed)
self.relativity_threshold = gtk.Entry(max=8)
self.relativity_threshold.set_text('1.00e-12')
self.relativity_threshold.set_sensitive(False)
pack(vbox, [self.spinpol,
self.moment_spin,
gtk.Label(_(" Charge")),
self.charge_spin,
gtk.Label(_(" Relativity")),
self.relativity_type,
gtk.Label(_(" Threshold")),
self.relativity_threshold])
pack(vbox, gtk.Label(""))
# self-consistency criteria
pack(vbox,[gtk.Label(_("Self-consistency convergence:"))])
self.sc_tot_energy = gtk.Adjustment(1e-6, 1e-6, 1e0, 1e-6)
self.sc_tot_energy_spin = gtk.SpinButton(self.sc_tot_energy, 0, 0)
self.sc_tot_energy_spin.set_digits(6)
self.sc_tot_energy_spin.set_numeric(True)
self.sc_sum_eigenvalue = gtk.Adjustment(1e-3, 1e-6, 1e0, 1e-6)
self.sc_sum_eigenvalue_spin = gtk.SpinButton(self.sc_sum_eigenvalue, 0, 0)
self.sc_sum_eigenvalue_spin.set_digits(6)
self.sc_sum_eigenvalue_spin.set_numeric(True)
self.sc_density = gtk.Adjustment(1e-4, 1e-6, 1e0, 1e-6)
self.sc_density_spin = gtk.SpinButton(self.sc_density, 0, 0)
self.sc_density_spin.set_digits(6)
self.sc_density_spin.set_numeric(True)
self.compute_forces = gtk.CheckButton(_("Compute forces"))
self.compute_forces.set_active(True)
self.compute_forces.connect("toggled", self.compute_forces_toggled,"")
self.sc_forces = gtk.Adjustment(1e-4, 1e-6, 1e0, 1e-6)
self.sc_forces_spin = gtk.SpinButton(self.sc_forces, 0, 0)
self.sc_forces_spin.set_numeric(True)
self.sc_forces_spin.set_digits(6)
# XXX: use gtk table for layout. Spaces will not work well otherwise
# (depend on fonts, widget style, ...)
# TRANSLATORS: Don't care too much about these, just get approximately
# the same string lengths
pack(vbox, [gtk.Label(_("Energy: ")),
self.sc_tot_energy_spin,
gtk.Label(_(" eV Sum of eigenvalues: ")),
self.sc_sum_eigenvalue_spin,
gtk.Label(_(" eV"))])
pack(vbox, [gtk.Label(_("Electron density: ")),
self.sc_density_spin,
gtk.Label(_(" Force convergence: ")),
self.sc_forces_spin,
gtk.Label(_(" eV/Ang "))])
pack(vbox, [self.compute_forces])
pack(vbox, gtk.Label(""))
swin = gtk.ScrolledWindow()
swin.set_border_width(0)
swin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.expert_keyword_set = gtk.Entry(max = 55)
self.expert_keyword_add = gtk.Button(stock = gtk.STOCK_ADD)
self.expert_keyword_add.connect("clicked", self.expert_keyword_import)
self.expert_keyword_set.connect("activate", self.expert_keyword_import)
pack(vbox,[gtk.Label(_("Additional keywords: ")),
self.expert_keyword_set,
self.expert_keyword_add])
self.expert_vbox = gtk.VBox()
vbox.pack_start(swin, True, True, 0)
swin.add_with_viewport(self.expert_vbox)
self.expert_vbox.get_parent().set_shadow_type(gtk.SHADOW_NONE)
self.expert_vbox.get_parent().set_size_request(-1, 100)
swin.show()
self.expert_vbox.show()
pack(vbox, gtk.Label(""))
# run command and species defaults:
pack(vbox, gtk.Label(_('FHI-aims execution command: ')))
self.run_command = pack(vbox, gtk.Entry(max=0))
pack(vbox, gtk.Label(_('Directory for species defaults: ')))
self.species_defaults = pack(vbox, gtk.Entry(max=0))
# set defaults from previous instance of the calculator, if applicable:
if param is not None:
self.set_param(param)
else:
self.set_defaults()
# Buttons at the bottom
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
default_but = gtk.Button(_("Set Defaults"))
default_but.connect("clicked",self.set_defaults)
import_control_but = gtk.Button(_("Import control.in"))
import_control_but.connect("clicked",self.import_control)
export_control_but = gtk.Button(_("Export control.in"))
export_control_but.connect("clicked", self.export_control)
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(default_but, 0, 0)
butbox.pack_start(import_control_but, 0, 0)
butbox.pack_start(export_control_but, 0, 0)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
self.expert_vbox.show()
vbox.show()
self.add(vbox)
self.show()
self.grab_add()
if aims_periodic_warning:
oops(aims_pbc_warning_text)
def set_defaults(self, *args):
atoms = self.owner.atoms.copy()
if not self.xc_setup:
self.xc_setup = True
for i, x in enumerate(self.xc_list):
self.xc.append_text(x)
for i, x in enumerate(self.xc_list):
if x == self.aims_xc_default:
self.xc.set_active(i)
self.TS.set_active(False)
if self.periodic:
self.ucell = atoms.get_cell()
for i in range(3):
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
self.kpts_spin[i].set_value(default)
self.spinpol.set_active(False)
self.moment.set_value(0)
self.moment_spin.set_sensitive(False)
self.charge.set_value(0)
aims_relativity_default = 'none'
for a in atoms:
if a.number > 20:
aims_relativity_default = 'atomic_zora'
for i, x in enumerate(self.aims_relativity_list):
if x == aims_relativity_default:
self.relativity_type.set_active(i)
self.sc_tot_energy.set_value(1e-6)
self.sc_sum_eigenvalue.set_value(1e-3)
self.sc_density.set_value(1e-4)
self.sc_forces.set_value(1e-4)
for key in self.expert_keywords:
key[0].destroy()
key[1].destroy()
key[2].destroy()
key[3] = False
for child in self.expert_vbox.children():
self.expert_vbox.remove(child)
if os.environ.has_key('AIMS_COMMAND'):
text = os.environ['AIMS_COMMAND']
else:
text = ""
self.run_command.set_text(text)
if os.environ.has_key('AIMS_SPECIES_DIR'):
text = os.environ['AIMS_SPECIES_DIR']
else:
text = ""
self.species_defaults.set_text(text)
def set_attributes(self, *args):
param = {}
param["xc"] = self.xc.get_active_text()
if self.periodic:
param["k_grid"] = (int(self.kpts[0].value),
int(self.kpts[1].value),
int(self.kpts[2].value))
if self.spinpol.get_active():
param["spin"] = "collinear"
param["default_initial_moment"] = self.moment.get_value()
else:
param["spin"] = "none"
param["default_initial_moment"] = None
param["vdw_correction_hirshfeld"] = self.TS.get_active()
param["charge"] = self.charge.value
param["relativistic"] = self.relativity_type.get_active_text()
if param["relativistic"] == 'atomic_zora':
param["relativistic"] += " scalar "
if param["relativistic"] == 'zora':
param["relativistic"] += " scalar "+self.relativity_threshold.get_text()
param["sc_accuracy_etot"] = self.sc_tot_energy.value
param["sc_accuracy_eev"] = self.sc_sum_eigenvalue.value
param["sc_accuracy_rho"] = self.sc_density.value
param["compute_forces"] = self.compute_forces.get_active()
param["sc_accuracy_forces"] = self.sc_forces.value
param["run_command"] = self.run_command.get_text()
param["species_dir"] = self.species_defaults.get_text()
from ase.calculators.aims import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,input_keys
for option in self.expert_keywords:
if option[3]: # set type of parameter according to which list it is in
key = option[0].get_text().strip()
val = option[1].get_text().strip()
if key == 'output':
if param.has_key('output'):
param[key] += [val]
else:
param[key] = [val]
elif key in float_keys or key in exp_keys:
param[key] = float(val)
elif key in list_keys or key in string_keys or key in input_keys:
param[key] = val
elif key in int_keys:
param[key] = int(val)
elif key in bool_keys:
param[key] = bool(val)
setattr(self.owner, self.attrname, param)
def set_param(self, param):
if param["xc"] is not None:
for i, x in enumerate(self.xc_list):
if x == param["xc"]:
self.xc.set_active(i)
if isinstance(param["vdw_correction_hirshfeld"],bool):
self.TS.set_active(param["vdw_correction_hirshfeld"])
if self.periodic and param["k_grid"] is not None:
self.kpts[0].value = int(param["k_grid"][0])
self.kpts[1].value = int(param["k_grid"][1])
self.kpts[2].value = int(param["k_grid"][2])
if param["spin"] is not None:
self.spinpol.set_active(param["spin"] == "collinear")
self.moment_spin.set_sensitive(param["spin"] == "collinear")
if param["default_initial_moment"] is not None:
self.moment.value = param["default_initial_moment"]
if param["charge"] is not None:
self.charge.value = param["charge"]
if param["relativistic"] is not None:
if isinstance(param["relativistic"],(tuple,list)):
rel = param["relativistic"]
else:
rel = param["relativistic"].split()
for i, x in enumerate(self.aims_relativity_list):
if x == rel[0]:
self.relativity_type.set_active(i)
if x == 'zora':
self.relativity_threshold.set_text(rel[2])
self.relativity_threshold.set_sensitive(True)
if param["sc_accuracy_etot"] is not None:
self.sc_tot_energy.value = param["sc_accuracy_etot"]
if param["sc_accuracy_eev"] is not None:
self.sc_sum_eigenvalue.value = param["sc_accuracy_eev"]
if param["sc_accuracy_rho"] is not None:
self.sc_density.value = param["sc_accuracy_rho"]
if param["compute_forces"] is not None:
if param["compute_forces"]:
if param["sc_accuracy_forces"] is not None:
self.sc_forces.value = param["sc_accuracy_forces"]
self.compute_forces.set_active(param["compute_forces"])
else:
self.compute_forces.set_active(False)
if param["run_command"] is not None:
self.run_command.set_text(param["run_command"])
if param["species_dir"] is not None:
self.species_defaults.set_text(param["species_dir"])
for (key,val) in param.items():
if key in self.aims_keyword_list and key not in self.aims_keyword_gui_list:
if val is not None: # = existing "expert keyword"
if key == 'output': # 'output' can be used more than once
options = val
if isinstance(options,str): options = [options]
for arg in options:
self.expert_keyword_create([key]+[arg])
else:
if isinstance(val,str):
arg = [key]+val.split()
elif isinstance(val,(tuple,list)):
arg = [key]+[str(a) for a in val]
else:
arg = [key]+[str(val)]
self.expert_keyword_create(arg)
def ok(self, *args):
self.set_attributes(*args)
self.destroy()
def export_control(self, *args):
filename = "control.in"
chooser = gtk.FileChooserDialog(
_('Export parameters ... '), None, gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_filename(filename)
save = chooser.run()
if save == gtk.RESPONSE_OK or save == gtk.RESPONSE_SAVE:
filename = chooser.get_filename()
self.set_attributes(*args)
param = getattr(self.owner, "aims_parameters")
from ase.calculators.aims import Aims
calc_temp = Aims(**param)
atoms_temp = self.owner.atoms.copy()
atoms_temp.set_calculator(calc_temp)
atoms_temp.calc.write_control(file = filename)
atoms_temp.calc.write_species(file = filename)
chooser.destroy()
def import_control(self, *args):
filename = "control.in"
chooser = gtk.FileChooserDialog(
_('Import control.in file ... '), None,
gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_filename(filename)
save = chooser.run()
if save == gtk.RESPONSE_OK:
self.set_defaults()
filename = chooser.get_filename()
control = open(filename,'r')
while True:
line = control.readline()
if not line:
break
if "List of parameters used to initialize the calculator:" in line:
control.readline()
from ase.io.aims import read_aims_calculator
calc = read_aims_calculator(control)
found_aims_calculator = True
control.close()
if found_aims_calculator:
param = calc.float_params
for key in calc.exp_params:
param[key] = calc.exp_params[key]
for key in calc.string_params:
param[key] = calc.string_params[key]
for key in calc.int_params:
param[key] = calc.int_params[key]
for key in calc.bool_params:
param[key] = calc.bool_params[key]
for key in calc.list_params:
param[key] = calc.list_params[key]
for key in calc.input_parameters:
param[key] = calc.input_parameters[key]
self.set_defaults()
self.set_param(param)
chooser.destroy()
def k_changed(self, *args):
size = [self.kpts[i].value * np.sqrt(np.vdot(self.ucell[i],self.ucell[i])) for i in range(3)]
self.kpts_label.set_text(self.kpts_label_format % tuple(size))
def compute_forces_toggled(self, *args):
self.sc_forces_spin.set_sensitive(self.compute_forces.get_active())
def relativity_changed(self, *args):
self.relativity_threshold.set_sensitive(self.relativity_type.get_active() == 2)
def spinpol_changed(self, *args):
self.moment_spin.set_sensitive(self.spinpol.get_active())
def expert_keyword_import(self, *args):
command = self.expert_keyword_set.get_text().split()
if len(command) > 0 and command[0] in self.aims_keyword_list and not command[0] in self.aims_keyword_gui_list:
self.expert_keyword_create(command)
elif command[0] in self.aims_keyword_gui_list:
oops(_("Please use the facilities provided in this window to "
"manipulate the keyword: %s!") % command[0])
else:
oops(_("Don't know this keyword: %s\n"
"\nPlease check!\n\n"
"If you really think it should be available, "
"please add it to the top of ase/calculators/aims.py.")
% command[0])
self.expert_keyword_set.set_text("")
def expert_keyword_create(self, command):
key = command[0]
argument = command[1]
if len(command) > 2:
for a in command[2:]:
argument += ' '+a
index = len(self.expert_keywords)
self.expert_keywords += [[gtk.Label(" " +key+" "),
gtk.Entry(max=45),
ExpertDeleteButton(index),
True]]
self.expert_keywords[index][1].set_text(argument)
self.expert_keywords[index][2].connect('clicked',self.expert_keyword_delete)
if not self.expert_vbox.get_children():
table = gtk.Table(1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, 0, 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, 0, 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, 0, 1, 0)
table.show_all()
pack(self.expert_vbox, table)
else:
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows + 1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, nrows, nrows + 1, 0)
table.show_all()
def expert_keyword_delete(self, button, *args):
index = button.index # which one to kill
for i in [0,1,2]:
self.expert_keywords[index][i].destroy()
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows-1, 3)
self.expert_keywords[index][3] = False
class ExpertDeleteButton(gtk.Button):
def __init__(self, index):
gtk.Button.__init__(self, stock=gtk.STOCK_DELETE)
alignment = self.get_children()[0]
hbox = alignment.get_children()[0]
#self.set_size_request(1, 3)
image, label = hbox.get_children()
if image is not None:
label.set_text('Del')
self.index = index
class VASP_Window(gtk.Window):
vasp_xc_list = ['PW91', 'PBE', 'LDA']
vasp_xc_default = 'PBE'
vasp_prec_default = 'Normal'
def __init__(self, owner, param, attrname):
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
self.periodic = atoms.get_pbc().all()
self.vasp_keyword_gui_list = ['ediff','encut', 'ismear', 'ispin', 'prec', 'sigma']
from ase.calculators.vasp import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,special_keys
self.vasp_keyword_list = float_keys+exp_keys+string_keys+int_keys+bool_keys+list_keys+special_keys
self.expert_keywords = []
natoms = len(atoms)
gtk.Window.__init__(self)
self.set_title(_("VASP parameters"))
vbox = gtk.VBox()
vbox.set_border_width(5)
# Print some info
txt = _("%i atoms.\n") % natoms
self.ucell = atoms.get_cell()
txt += _("Periodic geometry, unit cell is: \n")
for i in range(3):
txt += "(%8.3f %8.3f %8.3f)\n" % (self.ucell[i][0], self.ucell[i][1], self.ucell[i][2])
pack(vbox, [gtk.Label(txt)])
# XC functional ()
self.xc = gtk.combo_box_new_text()
for i, x in enumerate(self.vasp_xc_list):
self.xc.append_text(x)
# Spin polarized
self.spinpol = gtk.CheckButton(_("Spin polarized"))
pack(vbox, [gtk.Label(_("Exchange-correlation functional: ")),
self.xc,
gtk.Label(" "),
self.spinpol])
pack(vbox, gtk.Label(""))
# k-grid
self.kpts = []
self.kpts_spin = []
for i in range(3):
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
g = gtk.Adjustment(default, 1, 100, 1)
s = gtk.SpinButton(g, 0, 0)
self.kpts.append(g)
self.kpts_spin.append(s)
g.connect("value-changed", self.k_changed)
# Precision of calculation
self.prec = gtk.combo_box_new_text()
for i, x in enumerate(['Low', 'Normal', 'Accurate']):
self.prec.append_text(x)
if x == self.vasp_prec_default:
self.prec.set_active(i)
# cutoff energy
if os.environ.has_key('VASP_PP_PATH'):
self.encut_min_default, self.encut_max_default = self.get_min_max_cutoff()
else:
self.encut_max_default = 400.0
self.encut_min_default = 100.0
self.encut = gtk.Adjustment(self.encut_max_default, 0, 9999, 10)
self.encut_spin = gtk.SpinButton(self.encut, 0, 0)
self.encut_spin.set_digits(2)
self.encut_spin.connect("value-changed",self.check_encut_warning)
self.encut_warning = gtk.Label("")
pack(vbox, [gtk.Label(_("k-points k = (")), self.kpts_spin[0],
gtk.Label(", "), self.kpts_spin[1], gtk.Label(", "),
self.kpts_spin[2],
gtk.Label(_(") Cutoff: ")),self.encut_spin,
gtk.Label(_(" Precision: ")),self.prec])
self.kpts_label = gtk.Label("")
self.kpts_label_format = _(u"k-points x size: (%.1f, %.1f, %.1f) Å ")
pack(vbox, [self.kpts_label, self.encut_warning])
self.k_changed()
pack(vbox, gtk.Label(""))
self.ismear = gtk.combo_box_new_text()
for x in ['Fermi', 'Gauss', 'Methfessel-Paxton']:
self.ismear.append_text(x)
self.ismear.set_active(2)
self.smearing_order = gtk.Adjustment(2,0,9,1)
self.smearing_order_spin = gtk.SpinButton(self.smearing_order,0,0)
self.smearing_order_spin.set_digits(0)
self.ismear.connect("changed", self.check_ismear_changed)
self.sigma = gtk.Adjustment(0.1, 0.001, 9.0, 0.1)
self.sigma_spin = gtk.SpinButton(self.sigma,0,0)
self.sigma_spin.set_digits(3)
pack(vbox, [gtk.Label(_("Smearing: ")),
self.ismear,
gtk.Label(_(" order: ")),
self.smearing_order_spin,
gtk.Label(_(" width: ")),
self.sigma_spin])
pack(vbox, gtk.Label(""))
self.ediff = gtk.Adjustment(1e-4, 1e-6, 1e0, 1e-4)
self.ediff_spin = gtk.SpinButton(self.ediff, 0, 0)
self.ediff_spin.set_digits(6)
pack(vbox,[gtk.Label(_("Self-consistency convergence: ")),
self.ediff_spin,
gtk.Label(_(" eV"))])
pack(vbox,gtk.Label(""))
swin = gtk.ScrolledWindow()
swin.set_border_width(0)
swin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.expert_keyword_set = gtk.Entry(max = 55)
self.expert_keyword_add = gtk.Button(stock = gtk.STOCK_ADD)
self.expert_keyword_add.connect("clicked", self.expert_keyword_import)
self.expert_keyword_set.connect("activate", self.expert_keyword_import)
pack(vbox,[gtk.Label(_("Additional keywords: ")),
self.expert_keyword_set,
self.expert_keyword_add])
self.expert_vbox = gtk.VBox()
vbox.pack_start(swin, True, True, 0)
swin.add_with_viewport(self.expert_vbox)
self.expert_vbox.get_parent().set_shadow_type(gtk.SHADOW_NONE)
self.expert_vbox.get_parent().set_size_request(-1, 100)
swin.show()
self.expert_vbox.show()
pack(vbox, gtk.Label(""))
# run command and location of POTCAR files:
pack(vbox, gtk.Label(_('VASP execution command: ')))
self.run_command = pack(vbox, gtk.Entry(max=0))
if os.environ.has_key('VASP_COMMAND'):
self.run_command.set_text(os.environ['VASP_COMMAND'])
pack(vbox, gtk.Label(_('Directory for species defaults: ')))
self.pp_path = pack(vbox, gtk.Entry(max=0))
if os.environ.has_key('VASP_PP_PATH'):
self.pp_path.set_text(os.environ['VASP_PP_PATH'])
# Buttons at the bottom
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
set_default_but = gtk.Button(_("Set Defaults"))
set_default_but.connect("clicked", self.set_defaults)
import_vasp_but = gtk.Button(_("Import VASP files"))
import_vasp_but.connect("clicked", self.import_vasp_files)
export_vasp_but = gtk.Button(_("Export VASP files"))
export_vasp_but.connect("clicked", self.export_vasp_files)
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(set_default_but, 0, 0)
butbox.pack_start(import_vasp_but, 0, 0)
butbox.pack_start(export_vasp_but, 0, 0)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
vbox.show()
self.add(vbox)
self.show()
self.grab_add() # Lock all other windows
self.load_attributes()
def load_attributes(self, directory = "."):
"""Sets values of fields of the window according to the values
set inside the INCAR, KPOINTS and POTCAR file in 'directory'."""
from os import chdir
chdir(directory)
# Try and load INCAR, in the current directory
from ase.calculators.vasp import Vasp
calc_temp = Vasp()
try:
calc_temp.read_incar("INCAR")
except IOError:
pass
else:
if calc_temp.spinpol:
self.spinpol.set_active(True)
else:
self.spinpol.set_active(False)
if calc_temp.float_params['encut']:
self.encut.set_value(calc_temp.float_params['encut'])
if calc_temp.int_params['ismear'] == -1: # Fermi
vasp_ismear_default = 'Fermi'
elif calc_temp.int_params['ismear'] == 0: # Gauss
vasp_ismear_default = 'Gauss'
elif calc_temp.int_params['ismear'] > 0: # Methfessel-Paxton
vasp_ismear_default = 'Methfessel-Paxton'
else:
vasp_ismear_default = None
for i, x in enumerate(['Fermi', 'Gauss', 'Methfessel-Paxton']):
if vasp_ismear_default == x:
self.ismear.set_active(i)
if calc_temp.exp_params['ediff']:
self.ediff.set_value(calc_temp.exp_params['ediff'])
for i, x in enumerate(['Low', 'Normal', 'Accurate']):
if x == calc_temp.string_params['prec']:
self.prec.set_active(i)
if calc_temp.float_params['sigma']:
self.sigma.set_value(calc_temp.float_params['sigma'])
import copy
all_params = copy.deepcopy(calc_temp.float_params)
all_params.update(calc_temp.exp_params)
all_params.update(calc_temp.string_params)
all_params.update(calc_temp.int_params)
all_params.update(calc_temp.bool_params)
all_params.update(calc_temp.special_params)
for (key, value) in all_params.items():
if key in self.vasp_keyword_list \
and key not in self.vasp_keyword_gui_list \
and value is not None:
command = key + " " + str(value)
self.expert_keyword_create(command.split())
for (key, value) in calc_temp.list_params.items():
if key == "magmom" and value is not None:
command = key + " "
rep = 1
previous = value[0]
for v in value[1:]:
if v == previous:
rep += 1
else:
if rep > 1:
command += "%d*%f " % (rep, previous)
else:
command += "%f " % previous
rep = 1
previous = v
if rep > 1:
command += "%d*%f " % (rep, previous)
else:
command += "%f" % previous
self.expert_keyword_create(command.split())
elif value is not None:
command = key + " "
for v in value:
command += str(v) + " "
self.expert_keyword_create(command.split())
# Try and load POTCAR, in the current directory
try:
calc_temp.read_potcar()
except IOError:
pass
else:
#Set xc read from POTCAR
for i, x in enumerate(self.vasp_xc_list):
if x == calc_temp.input_params['xc']:
self.xc.set_active(i)
# Try and load KPOINTS, in the current directory
try:
calc_temp.read_kpoints("KPOINTS")
except IOError:
pass
else:
# Set KPOINTS grid dimensions
for i in range(3):
self.kpts_spin[i].set_value(calc_temp.input_params['kpts'][i])
def set_attributes(self, *args):
self.param = {}
self.param["xc"] = self.xc.get_active_text()
self.param["prec"] = self.prec.get_active_text()
self.param["kpts"] = (int(self.kpts[0].value),
int(self.kpts[1].value),
int(self.kpts[2].value))
self.param["encut"] = self.encut.value
self.param["ediff"] = self.ediff.value
self.param["ismear"] = self.get_ismear()
self.param["sigma"] = self.sigma.value
if self.spinpol.get_active():
self.param["ispin"] = 2
else:
self.param["ispin"] = 1
from ase.calculators.vasp import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,special_keys
for option in self.expert_keywords:
if option[3]: # set type of parameter accoding to which list it is in
key = option[0].get_text().split()[0].strip()
val = option[1].get_text().strip()
if key in float_keys or key in exp_keys:
self.param[key] = float(val)
elif key == "magmom":
val = val.replace("*", " * ")
c = val.split()
val = []
i = 0
while i < len(c):
if c[i] == "*":
b = val.pop()
i += 1
for j in range(int(b)):
val.append(float(c[i]))
else:
val.append(float(c[i]))
i += 1
self.param[key] = val
elif key in list_keys:
c = val.split()
val = []
for i in c:
val.append(float(i))
self.param[key] = val
elif key in string_keys or key in special_keys:
self.param[key] = val
elif key in int_keys:
self.param[key] = int(val)
elif key in bool_keys:
self.param[key] = bool(val)
setattr(self.owner, self.attrname, self.param)
os.environ['VASP_COMMAND'] = self.run_command.get_text()
os.environ['VASP_PP_PATH'] = self.pp_path.get_text()
def ok(self, *args):
self.set_attributes(*args)
self.destroy()
def get_min_max_cutoff(self, *args):
# determine the recommended energy cutoff limits
from ase.calculators.vasp import Vasp
calc_temp = Vasp()
atoms_temp = self.owner.atoms.copy()
calc_temp.initialize(atoms_temp)
calc_temp.write_potcar(suffix = '.check_energy_cutoff')
enmin = -1e6
enmax = -1e6
for line in open("POTCAR.check_energy_cutoff",'r').readlines():
if "ENMIN" in line:
enmax = max(enmax,float(line.split()[2].split(';')[0]))
enmin = max(enmin,float(line.split()[5]))
from os import system
system("rm POTCAR.check_energy_cutoff")
return enmin, enmax
def k_changed(self, *args):
size = [self.kpts[i].value * np.sqrt(np.vdot(self.ucell[i],self.ucell[i])) for i in range(3)]
self.kpts_label.set_text(self.kpts_label_format % tuple(size))
def check_encut_warning(self,*args):
if self.encut.value < self.encut_min_default:
self.encut_warning.set_markup(_("<b>WARNING:</b> cutoff energy is lower than recommended minimum!"))
else:
self.encut_warning.set_markup("")
def check_ismear_changed(self,*args):
if self.ismear.get_active_text() == 'Methfessel-Paxton':
self.smearing_order_spin.set_sensitive(True)
else:
self.smearing_order_spin.set_sensitive(False)
def get_ismear(self,*args):
type = self.ismear.get_active_text()
if type == 'Methfessel-Paxton':
ismear_value = self.smearing_order.value
elif type == 'Fermi':
ismear_value = -1
else:
ismear_value = 0
return ismear_value
def destroy(self):
self.grab_remove()
gtk.Window.destroy(self)
def set_defaults(self, *args):
# Reset fields to what they were
self.spinpol.set_active(False)
for i, x in enumerate(['Low', 'Normal', 'Accurate']):
if x == self.vasp_prec_default:
self.prec.set_active(i)
self.encut_spin.set_value(self.encut_max_default)
self.ismear.set_active(2)
self.smearing_order.set_value(2)
self.ediff.set_value(1e-4)
for child in self.expert_vbox.children():
self.expert_vbox.remove(child)
for i, x in enumerate(self.vasp_xc_list):
if x == self.vasp_xc_default:
self.xc.set_active(i)
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
for i in range(3):
self.kpts_spin[i].set_value(default)
def import_vasp_files(self, *args):
dirname = ""
chooser = gtk.FileChooserDialog(
_('Import VASP input files: choose directory ... '),
None, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_filename(dirname)
openr = chooser.run()
if openr == gtk.RESPONSE_OK or openr == gtk.RESPONSE_SAVE:
dirname = chooser.get_filename()
self.load_attributes(dirname)
chooser.destroy()
def export_vasp_files(self, *args):
filename = ""
chooser = gtk.FileChooserDialog(
_('Export VASP input files: choose directory ... '),
None, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_filename(filename)
save = chooser.run()
if save == gtk.RESPONSE_OK or save == gtk.RESPONSE_SAVE:
filename = chooser.get_filename()
from os import chdir
chdir(filename)
self.set_attributes(*args)
param = getattr(self.owner, "vasp_parameters")
from ase.calculators.vasp import Vasp
calc_temp = Vasp(**param)
atoms_temp = self.owner.atoms.copy()
atoms_temp.set_calculator(calc_temp)
calc_temp.initialize(atoms_temp)
calc_temp.write_incar(atoms_temp)
calc_temp.write_potcar()
calc_temp.write_kpoints()
calc_temp.write_sort_file()
from ase.io.vasp import write_vasp
write_vasp('POSCAR', calc_temp.atoms_sorted, symbol_count = calc_temp.symbol_count)
chooser.destroy()
def expert_keyword_import(self, *args):
command = self.expert_keyword_set.get_text().split()
if len(command) > 0 and command[0] in self.vasp_keyword_list and not command[0] in self.vasp_keyword_gui_list:
self.expert_keyword_create(command)
elif command[0] in self.vasp_keyword_gui_list:
oops(_("Please use the facilities provided in this window to "
"manipulate the keyword: %s!") % command[0])
else:
oops(_("Don't know this keyword: %s"
"\nPlease check!\n\n"
"If you really think it should be available, "
"please add it to the top of ase/calculators/vasp.py.")
% command[0])
self.expert_keyword_set.set_text("")
def expert_keyword_create(self, command):
key = command[0]
if command[1] == "=":
command.remove("=")
argument = command[1]
if len(command) > 2:
for a in command[2:]:
argument += ' '+a
index = len(self.expert_keywords)
self.expert_keywords += [[gtk.Label(" " +key+" = "),
gtk.Entry(max=55),
ExpertDeleteButton(index),
True]]
self.expert_keywords[index][1].set_text(argument)
self.expert_keywords[index][2].connect('clicked',self.expert_keyword_delete)
if not self.expert_vbox.get_children():
table = gtk.Table(1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, 0, 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, 0, 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, 0, 1, 0)
table.show_all()
pack(self.expert_vbox, table)
else:
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows + 1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, nrows, nrows + 1, 0)
table.show_all()
def expert_keyword_delete(self, button, *args):
index = button.index # which one to kill
for i in [0,1,2]:
self.expert_keywords[index][i].destroy()
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows-1, 3)
self.expert_keywords[index][3] = False
| JConwayAWT/PGSS14CC | lib/python/multimetallics/ase/gui/calculator.py | Python | gpl-2.0 | 80,415 | [
"ASE",
"CRYSTAL",
"FHI-aims",
"GPAW",
"VASP"
] | 8199c57ac222a28dc4625727830124cb2412d5903f2b13db198227a3f5c3fe79 |
#
# This file is part of the statismo library.
#
# Author: Marcel Luethi (marcel.luethi@unibas.ch)
#
# Copyright (c) 2011 University of Basel
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of the project's author nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
import unittest
import tempfile
import os.path
import vtk
import statismo_VTK as statismo
from statismoTestUtils import DATADIR, getDataFiles, read_vtkpd
class Test(unittest.TestCase):
def setUp(self):
self.datafiles = getDataFiles(DATADIR)
ref = read_vtkpd(self.datafiles[0])
self.representer = statismo.vtkStandardMeshRepresenter.Create(ref)
def tearDown(self):
pass
def testName(self):
pass
def testAddDataset(self):
datamanager = statismo.DataManager_vtkPD.Create(self.representer)
datasets = map(read_vtkpd, self.datafiles)
for (dataset, filename) in zip(datasets, self.datafiles):
datamanager.AddDataset(dataset, filename)
self.assertEqual(datamanager.GetNumberOfSamples(),len(self.datafiles))
for (i, sampleData) in enumerate(datamanager.GetData()):
self.assertEqual(sampleData.GetDatasetURI(),self.datafiles[i])
def testLoadSave(self):
datamanager = statismo.DataManager_vtkPD.Create(self.representer)
datasets = map(read_vtkpd, self.datafiles)
for (dataset, filename) in zip(datasets, self.datafiles):
datamanager.AddDataset(dataset, filename)
tmpfile = tempfile.mktemp(suffix="h5")
representer = statismo.vtkStandardMeshRepresenter.Create()
datamanager.Save(tmpfile)
datamanager_new = statismo.DataManager_vtkPD.Load(representer, tmpfile)
self.assertEqual(datamanager.GetNumberOfSamples(), datamanager_new.GetNumberOfSamples())
sampleSet = datamanager.GetData()
newSampleSet = datamanager_new.GetData()
for (sample, newSample) in zip(sampleSet, newSampleSet):
self.assertTrue((sample.GetSampleVector() == newSample.GetSampleVector()).all() == True)
def testLoadSaveSurrogateData(self):
datamanager = statismo.DataManagerWithSurrogates_vtkPD.Create(self.representer, os.path.join(DATADIR, "..", "hand_images", "surrogates", "hand_surrogates_types.txt"))
ds_filename = os.path.join(DATADIR, "hand-1.vtk")
ds = read_vtkpd(ds_filename)
surrogate_filename = os.path.join(DATADIR, "..", "hand_images", "surrogates", "hand-1_surrogates.txt")
datamanager.AddDatasetWithSurrogates(ds, ds_filename, surrogate_filename)
tmpfile = tempfile.mktemp(suffix="h5")
datamanager.Save(tmpfile)
representer = statismo.vtkStandardMeshRepresenter.Create()
datamanager_new = statismo.DataManagerWithSurrogates_vtkPD.Load(representer, tmpfile)
self.assertEqual(datamanager.GetNumberOfSamples(), datamanager_new.GetNumberOfSamples())
sampleSet = datamanager.GetData()
newSampleSet = datamanager_new.GetData()
for (sample, newSample) in zip(sampleSet, newSampleSet):
self.assertTrue((sample.GetSampleVector() == newSample.GetSampleVector()).all() == True)
def testCrossValidation(self):
datamanager = statismo.DataManager_vtkPD.Create(self.representer)
datasets = map(read_vtkpd, self.datafiles)
for (dataset, filename) in zip(datasets, self.datafiles):
datamanager.AddDataset(dataset, filename)
cvFolds = datamanager.GetCrossValidationFolds(3, True)
self.assertEqual(len(cvFolds), 3)
training_data = cvFolds[0].GetTrainingData()
test_data = cvFolds[0].GetTestingData()
self.assertTrue(len(training_data) + len(test_data) == datamanager.GetNumberOfSamples())
containsSameElement = set(training_data).isdisjoint(test_data)
self.assertTrue(containsSameElement, "a dataset is both in the test and training data")
suite = unittest.TestLoader().loadTestsFromTestCase(Test)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| zarquon42b/statismo | modules/VTK/wrapping/tests/statismoTests/TestDataManager.py | Python | bsd-3-clause | 5,716 | [
"VTK"
] | ef1f2a6dd7e632ea92275210dc17d5c1185285592772ca81c4879261b8a36ae7 |
#
# Copyright (C) 2009, Brian Tanner
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Revision$
# $Date$
# $Author$
# $HeadURL$
import random
import sys
from rlglue.environment.Environment import Environment
from rlglue.environment import EnvironmentLoader as EnvironmentLoader
from rlglue.types import Observation
from rlglue.types import Action
from rlglue.types import Reward_observation_terminal
# This is a very simple discrete-state, episodic grid world that has
# exploding mines in it. If the agent steps on a mine, the episode
# ends with a large negative reward.
#
# The reward per step is -1, with +10 for exiting the game successfully
# and -100 for stepping on a mine.
# TO USE THIS Environment [order doesn't matter]
# NOTE: I'm assuming the Python codec is installed an is in your Python path
# - Start the rl_glue executable socket server on your computer
# - Run the SampleSarsaAgent and SampleExperiment from this or a
# different codec (Matlab, Python, Java, C, Lisp should all be fine)
# - Start this environment like:
# $> python sample_mines_environment.py
class mines_environment(Environment):
WORLD_FREE = 0
WORLD_OBSTACLE = 1
WORLD_MINE = 2
WORLD_GOAL = 3
randGenerator=random.Random()
fixedStartState=False
startRow=1
startCol=1
currentState=10
def env_init(self):
self.map=[ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
#The Python task spec parser is not yet able to build task specs programmatically
return "VERSION RL-Glue-3.0 PROBLEMTYPE episodic DISCOUNTFACTOR 1 OBSERVATIONS INTS (0 107) ACTIONS INTS (0 3) REWARDS (-100.0 10.0) EXTRA SampleMinesEnvironment(C/C++) by Brian Tanner."
def env_start(self):
if self.fixedStartState:
stateValid=self.setAgentState(self.startRow,self.startCol)
if not stateValid:
print "The fixed start state was NOT valid: "+str(int(self.startRow))+","+str(int(self.startRow))
self.setRandomState()
else:
self.setRandomState()
returnObs=Observation()
returnObs.intArray=[self.calculateFlatState()]
return returnObs
def env_step(self,thisAction):
# Make sure the action is valid
assert len(thisAction.intArray)==1,"Expected 1 integer action."
assert thisAction.intArray[0]>=0, "Expected action to be in [0,3]"
assert thisAction.intArray[0]<4, "Expected action to be in [0,3]"
self.updatePosition(thisAction.intArray[0])
theObs=Observation()
theObs.intArray=[self.calculateFlatState()]
returnRO=Reward_observation_terminal()
returnRO.r=self.calculateReward()
returnRO.o=theObs
returnRO.terminal=self.checkCurrentTerminal()
return returnRO
def env_cleanup(self):
pass
def env_message(self,inMessage):
# Message Description
# 'set-random-start-state'
#Action: Set flag to do random starting states (the default)
if inMessage.startswith("set-random-start-state"):
self.fixedStartState=False;
return "Message understood. Using random start state.";
# Message Description
# 'set-start-state X Y'
# Action: Set flag to do fixed starting states (row=X, col=Y)
if inMessage.startswith("set-start-state"):
splitString=inMessage.split(" ");
self.startRow=int(splitString[1]);
self.startCol=int(splitString[2]);
self.fixedStartState=True;
return "Message understood. Using fixed start state.";
# Message Description
# 'print-state'
# Action: Print the map and the current agent location
if inMessage.startswith("print-state"):
self.printState();
return "Message understood. Printed the state.";
return "SamplesMinesEnvironment(Python) does not respond to that message.";
def setAgentState(self,row, col):
self.agentRow=row
self.agentCol=col
return self.checkValid(row,col) and not self.checkTerminal(row,col)
def setRandomState(self):
numRows=len(self.map)
numCols=len(self.map[0])
startRow=self.randGenerator.randint(0,numRows-1)
startCol=self.randGenerator.randint(0,numCols-1)
while not self.setAgentState(startRow,startCol):
startRow=self.randGenerator.randint(0,numRows-1)
startCol=self.randGenerator.randint(0,numCols-1)
def checkValid(self,row, col):
valid=False
numRows=len(self.map)
numCols=len(self.map[0])
if(row < numRows and row >= 0 and col < numCols and col >= 0):
if self.map[row][col] != self.WORLD_OBSTACLE:
valid=True
return valid
def checkTerminal(self,row,col):
if (self.map[row][col] == self.WORLD_GOAL or self.map[row][col] == self.WORLD_MINE):
return True
return False
def checkCurrentTerminal(self):
return self.checkTerminal(self.agentRow,self.agentCol)
def calculateFlatState(self):
numRows=len(self.map)
return self.agentCol * numRows + self.agentRow
def updatePosition(self, theAction):
# When the move would result in hitting an obstacles, the agent simply doesn't move
newRow = self.agentRow;
newCol = self.agentCol;
if (theAction == 0):#move down
newCol = self.agentCol - 1;
if (theAction == 1): #move up
newCol = self.agentCol + 1;
if (theAction == 2):#move left
newRow = self.agentRow - 1;
if (theAction == 3):#move right
newRow = self.agentRow + 1;
#Check if new position is out of bounds or inside an obstacle
if(self.checkValid(newRow,newCol)):
self.agentRow = newRow;
self.agentCol = newCol;
def calculateReward(self):
if(self.map[self.agentRow][self.agentCol] == self.WORLD_GOAL):
return 10.0;
if(self.map[self.agentRow][self.agentCol] == self.WORLD_MINE):
return -100.0;
return -1.0;
def printState(self):
numRows=len(self.map)
numCols=len(self.map[0])
print "Agent is at: "+str(self.agentRow)+","+str(self.agentCol)
print "Columns:0-10 10-17"
print "Col ",
for col in range(0,numCols):
print col%10,
for row in range(0,numRows):
print
print "Row: "+str(row)+" ",
for col in range(0,numCols):
if self.agentRow==row and self.agentCol==col:
print "A",
else:
if self.map[row][col] == self.WORLD_GOAL:
print "G",
if self.map[row][col] == self.WORLD_MINE:
print "M",
if self.map[row][col] == self.WORLD_OBSTACLE:
print "*",
if self.map[row][col] == self.WORLD_FREE:
print " ",
print
if __name__=="__main__":
EnvironmentLoader.loadEnvironment(mines_environment()) | litlpoet/rl-library | projects/packages/examples/mines-sarsa-python/sample_mines_environment.py | Python | apache-2.0 | 7,218 | [
"Brian"
] | 8931f85be10c0e72ca8707602cd3fd65348b978ef2fd6818761977b7cf3da47a |
"""IPyMOL: View and control your PyMOL sessions from the IPython Notebook"""
import sys
from setuptools import setup, find_packages
from ipymol import __name__, __version__
NAME = __name__
VERSION = __version__
def read(filename):
import os
BASE_DIR = os.path.dirname(__file__)
filename = os.path.join(BASE_DIR, filename)
with open(filename, 'r') as fi:
return fi.read()
def readlist(filename):
rows = read(filename).split("\n")
rows = [x.strip() for x in rows if x.strip()]
return list(rows)
# if we are running on python 3, enable 2to3 and
# let it use the custom fixers from the custom_fixers
# package.
extra = {}
if sys.version_info >= (3, 0):
extra.update(
use_2to3=True,
)
setup(
name=NAME,
version=VERSION,
description=('IPyMOL allows you to control PyMOL sessions via IPython.'),
long_description = read('README.rst'),
platforms = (
"Windows", "Linux", "Mac OS-X", "Unix",
),
classifiers = (
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Operating System :: Unix',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Topic :: Scientific/Engineering',
),
keywords = 'ipython notebook pymol protein molecular visualization',
author="Carlos Xavier Hernandez",
author_email="cxh@stanford.edu",
url = 'https://github.com/cxhernandez/%s' % NAME,
download_url = 'https://github.com/cxhernandez/%s/tarball/master' % NAME,
license = 'MIT',
packages = find_packages(),
include_package_data = True,
package_data = {
'': ['README.rst',
'requirements.txt'],
},
zip_safe=True,
install_requires=readlist('requirements.txt'),
**extra
)
| cxhernandez/ipymol | setup.py | Python | mit | 2,190 | [
"PyMOL"
] | 5c62de7cd99203403ecddc806667d14df42ca26ef4aceaa4757bff9b0c71edf2 |
'''
This class processes the neural activity output from a Nengo model,
where the information has been saved to file using the numpy.savez or
numpy.savez_compressed method. It is assumed that the simulation was
run with 1ms timestep.
The data is read in and binned into bin_size ms windows to get firing
rates, then is averaged across num_average_together trials. After being
averaged the data is the smoothed with a 20ms Gaussian filter and saved
back to file using the numpy.savez_compressed method.
'''
import gc
import numpy as np
def read_spikes(folder, filename, n_neurons,
n_trials, start_num=0):
trajectories = []
min_length = 1e20
# read in all the files
gc.disable()
for ii in range(start_num, n_trials):
name = '%s/%s_trial%.4i.npz' % (folder, filename, ii)
print('reading in %s...' % name)
spikes = np.load(name)['array1']
spikes = spikes.reshape(-1, n_neurons)
if spikes.shape[0] < min_length:
min_length = spikes.shape[0]
print('clipping all data to length %i' % min_length)
trajectories.append(spikes)
gc.enable()
return np.array(trajectories[:][:min_length])
def bin_data(data, bin_size):
n_bins = int(data.shape[1] / bin_size)
data_binned = np.zeros((data.shape[0], n_bins, data.shape[2]))
for ii in range(data.shape[0]):
for jj in range(n_bins):
data_binned[ii, jj] = np.sum(
data[ii][jj*bin_size:(jj+1)*bin_size], axis=0)
return data_binned
def average_trials(data, n_avg_together):
n_avg_trials = int(data.shape[0] / n_avg_together)
data_avg = np.zeros((n_avg_trials, data.shape[1], data.shape[2]))
for ii in range(n_avg_trials):
avg = np.zeros((data.shape[1], data.shape[2]))
for jj in range(n_avg_together):
avg += data[ii * n_avg_together + jj]
avg /= n_avg_together
data_avg[ii] = np.copy(avg)
return data_avg
def apply_Gaussian_filter(data):
gauss = np.exp(-np.linspace(-1, 1, data.shape[1])**2 / (2*.02))
gauss /= np.sum(gauss)
data_smoothed = np.zeros(data.shape)
for ii in range(data.shape[0]):
smoothed = np.zeros((data.shape[1], data.shape[2]))
for jj in range(data.shape[2]):
smoothed[:, jj] = np.convolve(data[ii, :, jj],
gauss, mode='same')
data_smoothed[ii] = smoothed
return data_smoothed
def process_correlation_activity(folder, filename, n_neurons, n_trials,
bin_size=10, n_reaches=8, n_avg_together=1):
for start_num in range(n_reaches):
# read in trajectories' neural data
min_length = 1e20
trajectories = []
for ii in range(start_num, n_trials, n_reaches):
name = '%s/%s_trial%.4i.npz' % (folder, filename, ii)
print('reading in %s...' % name)
spikes = np.load(name)['array1']
spikes = spikes.reshape(-1, n_neurons)
if spikes.shape[0] < min_length:
min_length = spikes.shape[0]
trajectories.append(spikes)
# get the rest of our parameters for analysis
n_timesteps = min_length
n_avg_trials = int(len(trajectories) / n_avg_together)
neuron_data = []
for ii in range(len(trajectories)):
neuron_data.append(trajectories[ii][:n_timesteps])
neuron_data = np.asarray(neuron_data)
# bin the spikes
firing_rates = bin_data(neuron_data, bin_size=10)
# average the trials
fr_avgs = average_trials(firing_rates, n_avg_together=1)
# filter the data
filtered = apply_Gaussian_filter(fr_avgs)
for ii in range(filtered.shape[0]):
print('writing %i to file' % int(ii*8+start_num))
np.savez_compressed(
'%s/processed_data/%s_processed%.4i' %
(folder, filename, ii*8+start_num),
array1=filtered[ii])
| studywolf/REACH-paper | analysis/utils/process_spikes.py | Python | gpl-3.0 | 4,016 | [
"Gaussian"
] | 57acc17f29e09167b7eccf0d45ca41b7a51aa7ddc40d85cfd047f7c62a8bc1d2 |
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest
from basic_modules.metadata import Metadata
from process_damidseq import process_damidseq
@pytest.mark.idamidseq
@pytest.mark.pipeline
def test_idamidseq_pipeline_00():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \\
--lang=python \\
--library_path=${HOME}/bin \\
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \\
--log_level=debug \\
process_damidseq.py \\
--taxon_id 9606 \\
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \\
--assembly GRCh38 \\
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = {
'genome': resource_path + 'idear.Human.GCA_000001405.22.fasta',
'index': resource_path + 'idear.Human.GCA_000001405.22.fasta.bwa.tar.gz',
'fastq_1': resource_path + 'idear.Human.SRR3714775.fastq',
'fastq_2': resource_path + 'idear.Human.SRR3714776.fastq',
'bg_fastq_1': resource_path + 'idear.Human.SRR3714777.fastq',
'bg_fastq_2': resource_path + 'idear.Human.SRR3714778.fastq',
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", files['genome'], None,
{'assembly': 'GCA_000001405.22'}),
"index": Metadata(
"Index", "bwa_index", files['index'], files['genome'],
{'assembly': 'GCA_000001405.22', "tool": "bwa_indexer"}),
"fastq_1": Metadata(
"data_idamid_seq", "fastq", files['fastq_1'], None,
{'assembly': 'GCA_000001405.22'}
),
"fastq_2": Metadata(
"data_idamid_seq", "fastq", files['fastq_2'], None,
{'assembly': 'GCA_000001405.22'}
),
"bg_fastq_1": Metadata(
"data_idamid_seq", "fastq", files['bg_fastq_1'], None,
{'assembly': 'GCA_000001405.22'}
),
"bg_fastq_2": Metadata(
"data_idamid_seq", "fastq", files['bg_fastq_2'], None,
{'assembly': 'GCA_000001405.22'}
),
}
config_param = {
"idear_title": "Full genome sequences for Homo sapiens (GRCh38)",
"idear_description": "Full genome sequences for Homo sapiens (GRCh38)",
"idear_common_name": "Human",
"idear_organism": "Homo sapiens",
"idear_provider": "ENA",
"idear_release_date": "2013",
"idear_sample_param": "Nup98",
"idear_background_param": "GFP",
"execution": resource_path
}
files_out = {
"bam": [
files['fastq_1'].replace(".fastq", ".bam"),
files['fastq_2'].replace(".fastq", ".bam")
],
"bg_bam": [
files['bg_fastq_1'].replace(".fastq", ".bam"),
files['bg_fastq_2'].replace(".fastq", ".bam")
],
"bam_filtered": [
files['fastq_1'].replace(".fastq", ".filtered.bam"),
files['fastq_2'].replace(".fastq", ".filtered.bam")
],
"bg_bam_filtered": [
files['bg_fastq_1'].replace(".fastq", ".filtered.bam"),
files['bg_fastq_2'].replace(".fastq", ".filtered.bam")
],
"bsgenome": resource_path + "idear.Human.GCA_000001405.22.22.bsgenome.tar.gz",
"chrom_size": resource_path + "chrom.size",
"genome_2bit": resource_path + "idear.Human.GCA_000001405.22.2bit",
"seed_file": resource_path + "idear.Human.GCA_000001405.22.seed",
"bigwig": resource_path + "idear.Human.Nup98-GFP.bw"
}
damidseq_handle = process_damidseq(config_param)
damidseq_files, damidseq_meta = damidseq_handle.run(files, metadata, files_out) # pylint: disable=unused-variable
print(damidseq_files)
# Add tests for all files created
for f_out in damidseq_files:
print("iDamID-SEQ RESULTS FILE:", f_out)
# assert damidseq_files[f_out] == files_out[f_out]
if isinstance(damidseq_files[f_out], list):
for sub_file_out in damidseq_files[f_out]:
assert os.path.isfile(sub_file_out) is True
assert os.path.getsize(sub_file_out) > 0
try:
os.remove(sub_file_out)
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
else:
assert os.path.isfile(damidseq_files[f_out]) is True
assert os.path.getsize(damidseq_files[f_out]) > 0
try:
os.remove(damidseq_files[f_out])
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
@pytest.mark.idamidseq
@pytest.mark.pipeline
def test_idamidseq_pipeline_01():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \\
--lang=python \\
--library_path=${HOME}/bin \\
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \\
--log_level=debug \\
process_damidseq.py \\
--taxon_id 9606 \\
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \\
--assembly GRCh38 \\
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = {
'genome_public': resource_path + 'idear.Human.GCA_000001405.22.fasta',
'index_public': resource_path + 'idear.Human.GCA_000001405.22.fasta.bwa.tar.gz',
'fastq_1': resource_path + 'idear.Human.SRR3714775.fastq',
'fastq_2': resource_path + 'idear.Human.SRR3714776.fastq',
'bg_fastq_1': resource_path + 'idear.Human.SRR3714777.fastq',
'bg_fastq_2': resource_path + 'idear.Human.SRR3714778.fastq',
}
metadata = {
"genome_public": Metadata(
"Assembly", "fasta", files['genome_public'], None,
{'assembly': 'GCA_000001405.22'}),
"index_public": Metadata(
"Index", "bwa_index", files['index_public'], files['genome_public'],
{'assembly': 'GCA_000001405.22', "tool": "bwa_indexer"}),
"fastq_1": Metadata(
"data_idamid_seq", "fastq", files['fastq_1'], None,
{'assembly': 'GCA_000001405.22'}
),
"fastq_2": Metadata(
"data_idamid_seq", "fastq", files['fastq_2'], None,
{'assembly': 'GCA_000001405.22'}
),
"bg_fastq_1": Metadata(
"data_idamid_seq", "fastq", files['bg_fastq_1'], None,
{'assembly': 'GCA_000001405.22'}
),
"bg_fastq_2": Metadata(
"data_idamid_seq", "fastq", files['bg_fastq_2'], None,
{'assembly': 'GCA_000001405.22'}
),
}
config_param = {
"idear_title": "Full genome sequences for Homo sapiens (GRCh38)",
"idear_description": "Full genome sequences for Homo sapiens (GRCh38)",
"idear_common_name": "Human",
"idear_organism": "Homo sapiens",
"idear_provider": "ENA",
"idear_release_date": "2013",
"idear_sample_param": "Nup98",
"idear_background_param": "GFP",
}
files_out = {
"bam": [
files['fastq_1'].replace(".fastq", ".bam"),
files['fastq_2'].replace(".fastq", ".bam")
],
"bg_bam": [
files['bg_fastq_1'].replace(".fastq", ".bam"),
files['bg_fastq_2'].replace(".fastq", ".bam")
],
"bam_filtered": [
files['fastq_1'].replace(".fastq", ".filtered.bam"),
files['fastq_2'].replace(".fastq", ".filtered.bam")
],
"bg_bam_filtered": [
files['bg_fastq_1'].replace(".fastq", ".filtered.bam"),
files['bg_fastq_2'].replace(".fastq", ".filtered.bam")
],
"bsgenome": resource_path + "idear.Human.GCA_000001405.22.22.bsgenome.tar.gz",
"chrom_size": resource_path + "chrom.size",
"genome_2bit": resource_path + "idear.Human.GCA_000001405.22.2bit",
"seed_file": resource_path + "idear.Human.GCA_000001405.22.seed",
"bigwig": resource_path + "idear.Human.Nup98-GFP.bw"
}
damidseq_handle = process_damidseq(config_param)
damidseq_files, damidseq_meta = damidseq_handle.run(files, metadata, files_out) # pylint: disable=unused-variable
print(damidseq_files)
# Add tests for all files created
for f_out in damidseq_files:
print("iDamID-SEQ RESULTS FILE:", f_out)
# assert damidseq_files[f_out] == files_out[f_out]
if isinstance(damidseq_files[f_out], list):
for sub_file_out in damidseq_files[f_out]:
assert os.path.isfile(sub_file_out) is True
assert os.path.getsize(sub_file_out) > 0
try:
os.remove(sub_file_out)
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
else:
assert os.path.isfile(damidseq_files[f_out]) is True
assert os.path.getsize(damidseq_files[f_out]) > 0
try:
os.remove(damidseq_files[f_out])
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
| Multiscale-Genomics/mg-process-fastq | tests/test_pipeline_idamidseq.py | Python | apache-2.0 | 10,827 | [
"BWA"
] | e541c965e782a6633b7dccf3b377ad24f4c4c66ff4771be99b7fb601c2033e48 |
# -*- coding: utf-8 -*-
#
# brunel_delta_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Random balanced network (delta synapses)
----------------------------------------
This script simulates an excitatory and an inhibitory population on
the basis of the network used in [1]_
When connecting the network, customary synapse models are used, which
allow for querying the number of created synapses. Using spike
recorders, the average firing rates of the neurons in the populations
are established. The building as well as the simulation time of the
network are recorded.
References
~~~~~~~~~~
.. [1] Brunel N (2000). Dynamics of sparsely connected networks of excitatory and
inhibitory spiking neurons. Journal of Computational Neuroscience 8,
183-208.
"""
###############################################################################
# Import all necessary modules for simulation, analysis and plotting.
import time
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# Assigning the current time to a variable in order to determine the build
# time of the network.
startbuild = time.time()
###############################################################################
# Assigning the simulation parameters to variables.
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
###############################################################################
# Definition of the parameters crucial for asynchronous irregular firing of
# the neurons.
g = 5.0 # ratio inhibitory weight/excitatory weight
eta = 2.0 # external rate relative to threshold rate
epsilon = 0.1 # connection probability
###############################################################################
# Definition of the number of neurons in the network and the number of neurons
# recorded from
order = 2500
NE = 4 * order # number of excitatory neurons
NI = 1 * order # number of inhibitory neurons
N_neurons = NE + NI # number of neurons in total
N_rec = 50 # record from 50 neurons
###############################################################################
# Definition of connectivity parameters
CE = int(epsilon * NE) # number of excitatory synapses per neuron
CI = int(epsilon * NI) # number of inhibitory synapses per neuron
C_tot = int(CI + CE) # total number of synapses per neuron
###############################################################################
# Initialization of the parameters of the integrate and fire neuron and the
# synapses. The parameters of the neuron are stored in a dictionary.
tauMem = 20.0 # time constant of membrane potential in ms
theta = 20.0 # membrane threshold potential in mV
neuron_params = {"C_m": 1.0,
"tau_m": tauMem,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": 0.0,
"V_m": 0.0,
"V_th": theta}
J = 0.1 # postsynaptic amplitude in mV
J_ex = J # amplitude of excitatory postsynaptic potential
J_in = -g * J_ex # amplitude of inhibitory postsynaptic potential
###############################################################################
# Definition of threshold rate, which is the external rate needed to fix the
# membrane potential around its threshold, the external firing rate and the
# rate of the poisson generator which is multiplied by the in-degree CE and
# converted to Hz by multiplication by 1000.
nu_th = theta / (J * CE * tauMem)
nu_ex = eta * nu_th
p_rate = 1000.0 * nu_ex * CE
###############################################################################
# Configuration of the simulation kernel by the previously defined time
# resolution used in the simulation. Setting ``print_time`` to `True` prints the
# already processed simulation time as well as its percentage of the total
# simulation time.
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"overwrite_files": True})
print("Building network")
###############################################################################
# Creation of the nodes using ``Create``. We store the returned handles in
# variables for later reference. Here the excitatory and inhibitory, as well
# as the poisson generator and two spike recorders. The spike recorders will
# later be used to record excitatory and inhibitory spikes. Properties of the
# nodes are specified via ``params``, which expects a dictionary.
nodes_ex = nest.Create("iaf_psc_delta", NE, params=neuron_params)
nodes_in = nest.Create("iaf_psc_delta", NI, params=neuron_params)
noise = nest.Create("poisson_generator", params={"rate": p_rate})
espikes = nest.Create("spike_recorder")
ispikes = nest.Create("spike_recorder")
###############################################################################
# Configuration of the spike recorders recording excitatory and inhibitory
# spikes by sending parameter dictionaries to ``set``. Setting the property
# `record_to` to *"ascii"* ensures that the spikes will be recorded to a file,
# whose name starts with the string assigned to the property `label`.
espikes.set(label="brunel-py-ex", record_to="ascii")
ispikes.set(label="brunel-py-in", record_to="ascii")
print("Connecting devices")
###############################################################################
# Definition of a synapse using ``CopyModel``, which expects the model name of
# a pre-defined synapse, the name of the customary synapse and an optional
# parameter dictionary. The parameters defined in the dictionary will be the
# default parameter for the customary synapse. Here we define one synapse for
# the excitatory and one for the inhibitory connections giving the
# previously defined weights and equal delays.
nest.CopyModel("static_synapse", "excitatory",
{"weight": J_ex, "delay": delay})
nest.CopyModel("static_synapse", "inhibitory",
{"weight": J_in, "delay": delay})
###############################################################################
# Connecting the previously defined poisson generator to the excitatory and
# inhibitory neurons using the excitatory synapse. Since the poisson
# generator is connected to all neurons in the population the default rule
# (# ``all_to_all``) of ``Connect`` is used. The synaptic properties are inserted
# via ``syn_spec`` which expects a dictionary when defining multiple variables
# or a string when simply using a pre-defined synapse.
nest.Connect(noise, nodes_ex, syn_spec="excitatory")
nest.Connect(noise, nodes_in, syn_spec="excitatory")
###############################################################################
# Connecting the first ``N_rec`` nodes of the excitatory and inhibitory
# population to the associated spike recorders using excitatory synapses.
# Here the same shortcut for the specification of the synapse as defined
# above is used.
nest.Connect(nodes_ex[:N_rec], espikes, syn_spec="excitatory")
nest.Connect(nodes_in[:N_rec], ispikes, syn_spec="excitatory")
print("Connecting network")
print("Excitatory connections")
###############################################################################
# Connecting the excitatory population to all neurons using the pre-defined
# excitatory synapse. Beforehand, the connection parameter are defined in a
# dictionary. Here we use the connection rule ``fixed_indegree``,
# which requires the definition of the indegree. Since the synapse
# specification is reduced to assigning the pre-defined excitatory synapse it
# suffices to insert a string.
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex + nodes_in, conn_params_ex, "excitatory")
print("Inhibitory connections")
###############################################################################
# Connecting the inhibitory population to all neurons using the pre-defined
# inhibitory synapse. The connection parameters as well as the synapse
# parameters are defined analogously to the connection from the excitatory
# population defined above.
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex + nodes_in, conn_params_in, "inhibitory")
###############################################################################
# Storage of the time point after the buildup of the network in a variable.
endbuild = time.time()
###############################################################################
# Simulation of the network.
print("Simulating")
nest.Simulate(simtime)
###############################################################################
# Storage of the time point after the simulation of the network in a variable.
endsimulate = time.time()
###############################################################################
# Reading out the total number of spikes received from the spike recorder
# connected to the excitatory population and the inhibitory population.
events_ex = espikes.n_events
events_in = ispikes.n_events
###############################################################################
# Calculation of the average firing rate of the excitatory and the inhibitory
# neurons by dividing the total number of recorded spikes by the number of
# neurons recorded from and the simulation time. The multiplication by 1000.0
# converts the unit 1/ms to 1/s=Hz.
rate_ex = events_ex / simtime * 1000.0 / N_rec
rate_in = events_in / simtime * 1000.0 / N_rec
###############################################################################
# Reading out the number of connections established using the excitatory and
# inhibitory synapse model. The numbers are summed up resulting in the total
# number of synapses.
num_synapses = (nest.GetDefaults("excitatory")["num_connections"] +
nest.GetDefaults("inhibitory")["num_connections"])
###############################################################################
# Establishing the time it took to build and simulate the network by taking
# the difference of the pre-defined time variables.
build_time = endbuild - startbuild
sim_time = endsimulate - endbuild
###############################################################################
# Printing the network properties, firing rates and building times.
print("Brunel network simulation (Python)")
print(f"Number of neurons : {N_neurons}")
print(f"Number of synapses: {num_synapses}")
print(f" Exitatory : {int(CE * N_neurons) + N_neurons}")
print(f" Inhibitory : {int(CI * N_neurons)}")
print(f"Excitatory rate : {rate_ex:.2f} Hz")
print(f"Inhibitory rate : {rate_in:.2f} Hz")
print(f"Building time : {build_time:.2f} s")
print(f"Simulation time : {sim_time:.2f} s")
###############################################################################
# Plot a raster of the excitatory neurons and a histogram.
nest.raster_plot.from_device(espikes, hist=True)
plt.show()
| lekshmideepu/nest-simulator | pynest/examples/brunel_delta_nest.py | Python | gpl-2.0 | 11,657 | [
"NEURON"
] | c4fe4e0f3ca7a89996c815fa35e45aa207ab16948119dcaf39f8d6e3f255e226 |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import logging
import os
from PyQt4 import QtCore, QtGui
from openlp.core.lib import ItemCapabilities, MediaManagerItem,MediaType, Receiver, ServiceItem, ServiceItemContext, \
Settings, UiStrings, build_icon, check_item_selected, check_directory_exists, translate
from openlp.core.lib.ui import critical_error_message_box, create_horizontal_adjusting_combo_box
from openlp.core.ui import DisplayController, Display, DisplayControllerType
from openlp.core.ui.media import get_media_players, set_media_players
from openlp.core.utils import AppLocation, locale_compare
log = logging.getLogger(__name__)
CLAPPERBOARD = u':/media/slidecontroller_multimedia.png'
VIDEO = build_icon(QtGui.QImage(u':/media/media_video.png'))
AUDIO = build_icon(QtGui.QImage(u':/media/media_audio.png'))
DVDICON = build_icon(QtGui.QImage(u':/media/media_video.png'))
ERROR = build_icon(QtGui.QImage(u':/general/general_delete.png'))
class MediaMediaItem(MediaManagerItem):
"""
This is the custom media manager item for Media Slides.
"""
log.info(u'%s MediaMediaItem loaded', __name__)
def __init__(self, parent, plugin, icon):
self.iconPath = u'images/image'
self.background = False
self.automatic = u''
MediaManagerItem.__init__(self, parent, plugin, icon)
self.singleServiceItem = False
self.hasSearch = True
self.mediaObject = None
self.displayController = DisplayController(parent)
self.displayController.controllerLayout = QtGui.QVBoxLayout()
self.media_controller.register_controller(self.displayController)
self.media_controller.set_controls_visible(self.displayController, False)
self.displayController.previewDisplay = Display(self.displayController, False, self.displayController)
self.displayController.previewDisplay.hide()
self.displayController.previewDisplay.setGeometry(QtCore.QRect(0, 0, 300, 300))
self.displayController.previewDisplay.screen = {u'size':self.displayController.previewDisplay.geometry()}
self.displayController.previewDisplay.setup()
self.media_controller.setup_display(self.displayController.previewDisplay, False)
QtCore.QObject.connect(Receiver.get_receiver(), QtCore.SIGNAL(u'video_background_replaced'),
self.videobackgroundReplaced)
QtCore.QObject.connect(Receiver.get_receiver(), QtCore.SIGNAL(u'mediaitem_media_rebuild'), self.rebuild_players)
QtCore.QObject.connect(Receiver.get_receiver(), QtCore.SIGNAL(u'config_screen_changed'), self.displaySetup)
# Allow DnD from the desktop
self.listView.activateDnD()
def retranslateUi(self):
self.onNewPrompt = translate('MediaPlugin.MediaItem', 'Select Media')
self.replaceAction.setText(UiStrings().ReplaceBG)
self.replaceAction.setToolTip(UiStrings().ReplaceLiveBG)
self.resetAction.setText(UiStrings().ResetBG)
self.resetAction.setToolTip(UiStrings().ResetLiveBG)
self.automatic = UiStrings().Automatic
self.displayTypeLabel.setText(translate('MediaPlugin.MediaItem', 'Use Player:'))
self.rebuild_players()
def requiredIcons(self):
MediaManagerItem.requiredIcons(self)
self.hasFileIcon = True
self.hasNewIcon = False
self.hasEditIcon = False
def addListViewToToolBar(self):
MediaManagerItem.addListViewToToolBar(self)
self.listView.addAction(self.replaceAction)
def addEndHeaderBar(self):
# Replace backgrounds do not work at present so remove functionality.
self.replaceAction = self.toolbar.addToolbarAction(u'replaceAction', icon=u':/slides/slide_blank.png',
triggers=self.onReplaceClick)
self.resetAction = self.toolbar.addToolbarAction(u'resetAction', icon=u':/system/system_close.png',
visible=False, triggers=self.onResetClick)
self.mediaWidget = QtGui.QWidget(self)
self.mediaWidget.setObjectName(u'mediaWidget')
self.displayLayout = QtGui.QFormLayout(self.mediaWidget)
self.displayLayout.setMargin(self.displayLayout.spacing())
self.displayLayout.setObjectName(u'displayLayout')
self.displayTypeLabel = QtGui.QLabel(self.mediaWidget)
self.displayTypeLabel.setObjectName(u'displayTypeLabel')
self.displayTypeComboBox = create_horizontal_adjusting_combo_box(self.mediaWidget, u'displayTypeComboBox')
self.displayTypeLabel.setBuddy(self.displayTypeComboBox)
self.displayLayout.addRow(self.displayTypeLabel, self.displayTypeComboBox)
# Add the Media widget to the page layout
self.pageLayout.addWidget(self.mediaWidget)
QtCore.QObject.connect(self.displayTypeComboBox, QtCore.SIGNAL(u'currentIndexChanged (int)'),
self.overridePlayerChanged)
def overridePlayerChanged(self, index):
player = get_media_players()[0]
if index == 0:
set_media_players(player)
else:
set_media_players(player, player[index-1])
def onResetClick(self):
"""
Called to reset the Live background with the media selected,
"""
self.media_controller.media_reset(self.live_controller)
self.resetAction.setVisible(False)
def videobackgroundReplaced(self):
"""
Triggered by main display on change of serviceitem.
"""
self.resetAction.setVisible(False)
def onReplaceClick(self):
"""
Called to replace Live background with the media selected.
"""
if check_item_selected(self.listView,
translate('MediaPlugin.MediaItem', 'You must select a media file to replace the background with.')):
item = self.listView.currentItem()
filename = item.data(QtCore.Qt.UserRole)
if os.path.exists(filename):
service_item = ServiceItem()
service_item.title = u'webkit'
service_item.shortname = service_item.title
(path, name) = os.path.split(filename)
service_item.add_from_command(path, name,CLAPPERBOARD)
if self.media_controller.video(DisplayControllerType.Live, service_item,
videoBehindText=True):
self.resetAction.setVisible(True)
else:
critical_error_message_box(UiStrings().LiveBGError,
translate('MediaPlugin.MediaItem', 'There was no display item to amend.'))
else:
critical_error_message_box(UiStrings().LiveBGError,
translate('MediaPlugin.MediaItem',
'There was a problem replacing your background, the media file "%s" no longer exists.') % filename)
def generateSlideData(self, service_item, item=None, xmlVersion=False, remote=False,
context=ServiceItemContext.Live):
if item is None:
item = self.listView.currentItem()
if item is None:
return False
filename = item.data(QtCore.Qt.UserRole)
if not os.path.exists(filename):
if not remote:
# File is no longer present
critical_error_message_box(
translate('MediaPlugin.MediaItem', 'Missing Media File'),
translate('MediaPlugin.MediaItem', 'The file %s no longer exists.') % filename)
return False
service_item.title = self.displayTypeComboBox.currentText()
service_item.shortname = service_item.title
(path, name) = os.path.split(filename)
service_item.add_from_command(path, name, CLAPPERBOARD)
# Only get start and end times if going to a service
if context == ServiceItemContext.Service:
# Start media and obtain the length
if not self.media_controller.media_length(service_item):
return False
service_item.add_capability(ItemCapabilities.CanAutoStartForLive)
service_item.add_capability(ItemCapabilities.RequiresMedia)
service_item.add_capability(ItemCapabilities.HasDetailedTitleDisplay)
if Settings().value(self.settingsSection + u'/media auto start') == QtCore.Qt.Checked:
service_item.will_auto_start = True
# force a non-existent theme
service_item.theme = -1
return True
def initialise(self):
self.listView.clear()
self.listView.setIconSize(QtCore.QSize(88, 50))
self.servicePath = os.path.join(AppLocation.get_section_data_path(self.settingsSection), u'thumbnails')
check_directory_exists(self.servicePath)
self.loadList(Settings().value(self.settingsSection + u'/media files'))
self.populateDisplayTypes()
def rebuild_players(self):
"""
Rebuild the tab in the media manager when changes are made in
the settings
"""
self.populateDisplayTypes()
self.onNewFileMasks = translate('MediaPlugin.MediaItem', 'Videos (%s);;Audio (%s);;%s (*)') % (
u' '.join(self.media_controller.video_extensions_list),
u' '.join(self.media_controller.audio_extensions_list), UiStrings().AllFiles)
def displaySetup(self):
self.media_controller.setup_display(self.displayController.previewDisplay, False)
def populateDisplayTypes(self):
"""
Load the combobox with the enabled media players,
allowing user to select a specific player if settings allow
"""
# block signals to avoid unnecessary overridePlayerChanged Signals
# while combo box creation
self.displayTypeComboBox.blockSignals(True)
self.displayTypeComboBox.clear()
usedPlayers, overridePlayer = get_media_players()
mediaPlayers = self.media_controller.mediaPlayers
currentIndex = 0
for player in usedPlayers:
# load the drop down selection
self.displayTypeComboBox.addItem(mediaPlayers[player].original_name)
if overridePlayer == player:
currentIndex = len(self.displayTypeComboBox)
if self.displayTypeComboBox.count() > 1:
self.displayTypeComboBox.insertItem(0, self.automatic)
self.displayTypeComboBox.setCurrentIndex(currentIndex)
if overridePlayer:
self.mediaWidget.show()
else:
self.mediaWidget.hide()
self.displayTypeComboBox.blockSignals(False)
def onDeleteClick(self):
"""
Remove a media item from the list.
"""
if check_item_selected(self.listView,
translate('MediaPlugin.MediaItem', 'You must select a media file to delete.')):
row_list = [item.row() for item in self.listView.selectedIndexes()]
row_list.sort(reverse=True)
for row in row_list:
self.listView.takeItem(row)
Settings().setValue(self.settingsSection + u'/media files', self.getFileList())
def loadList(self, media):
# Sort the media by its filename considering language specific
# characters.
media.sort(cmp=locale_compare, key=lambda filename: os.path.split(unicode(filename))[1])
for track in media:
track_info = QtCore.QFileInfo(track)
if not os.path.exists(track):
filename = os.path.split(unicode(track))[1]
item_name = QtGui.QListWidgetItem(filename)
item_name.setIcon(ERROR)
item_name.setData(QtCore.Qt.UserRole, track)
elif track_info.isFile():
filename = os.path.split(unicode(track))[1]
item_name = QtGui.QListWidgetItem(filename)
if u'*.%s' % (filename.split(u'.')[-1].lower()) in self.media_controller.audio_extensions_list:
item_name.setIcon(AUDIO)
else:
item_name.setIcon(VIDEO)
item_name.setData(QtCore.Qt.UserRole, track)
else:
filename = os.path.split(unicode(track))[1]
item_name = QtGui.QListWidgetItem(filename)
item_name.setIcon(build_icon(DVDICON))
item_name.setData(QtCore.Qt.UserRole, track)
item_name.setToolTip(track)
self.listView.addItem(item_name)
def getList(self, type=MediaType.Audio):
media = Settings().value(self.settingsSection + u'/media files')
media.sort(cmp=locale_compare, key=lambda filename: os.path.split(unicode(filename))[1])
ext = []
if type == MediaType.Audio:
ext = self.media_controller.audio_extensions_list
else:
ext = self.media_controller.video_extensions_list
ext = map(lambda x: x[1:], ext)
media = filter(lambda x: os.path.splitext(x)[1] in ext, media)
return media
def search(self, string, showError):
files = Settings().value(self.settingsSection + u'/media files')
results = []
string = string.lower()
for file in files:
filename = os.path.split(unicode(file))[1]
if filename.lower().find(string) > -1:
results.append([file, filename])
return results
| marmyshev/transitions | openlp/plugins/media/lib/mediaitem.py | Python | gpl-2.0 | 15,388 | [
"Brian"
] | 217ebc2d78ebb69270970d717645c6f7e183eff14f921a6cd9a3dbde1ab14fe7 |
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
import numpy as np
import galsim
def hlr_root_fwhm(fwhm, beta=2., truncationFWHM=2., flux=1., half_light_radius=1.):
m = galsim.Moffat(beta=beta, fwhm=fwhm, flux=flux, trunc=truncationFWHM*fwhm)
return (m.getHalfLightRadius() - half_light_radius)
def bisect_moffat_fwhm(beta=2., truncationFWHM=2., half_light_radius=1., fwhm_lower=0.1,
fwhm_upper=10., tol=1.2e-16):
"""Find the Moffat FWHM providing the desired half_light_radius in the old Moffat parameter
spec schema.
Uses interval bisection.
"""
y0 = hlr_root_fwhm(fwhm_lower, beta=beta, truncationFWHM=truncationFWHM,
half_light_radius=half_light_radius)
y1 = hlr_root_fwhm(fwhm_upper, beta=beta, truncationFWHM=truncationFWHM,
half_light_radius=half_light_radius)
dfwhm = fwhm_upper - fwhm_lower
while dfwhm >= tol:
fwhm_mid = fwhm_lower + .5 * dfwhm
ymid = hlr_root_fwhm(fwhm_mid, beta=beta, truncationFWHM=truncationFWHM,
half_light_radius=half_light_radius)
if y0 * ymid > 0.: # Root not in LHS
fwhm_lower = fwhm_mid
y0 = hlr_root_fwhm(fwhm_lower, beta=beta, truncationFWHM=truncationFWHM,
half_light_radius=half_light_radius)
elif y1 * ymid > 0.: # Root not in RHS
fwhm_upper = fwhm_mid
y1 = hlr_root_fwhm(fwhm_upper, beta=beta, truncationFWHM=truncationFWHM,
half_light_radius=half_light_radius)
elif ymid == 0.:
break
# Bisect interval
dfwhm *= .5
return fwhm_mid
| mardom/GalSim | devel/external/calculate_moffat_radii.py | Python | gpl-3.0 | 2,440 | [
"Galaxy"
] | d8d40f7cb62d36a23e5a0b92ad1dc79afc4d37ccd4af0bb22ab7675297eb9cf9 |
import COPASI
import sys
dm = COPASI.CRootContainer.addDatamodel()
assert (isinstance(dm, COPASI.CDataModel))
def print_group(label, group):
# type: (str, COPASI.CModelParameterGroup) -> None
""" This function takes a parameter group and prints it"""
print (label)
for i in range(group.size()):
current = group.getChild(i)
if isinstance(current, COPASI.CModelParameterGroup): # reaction parameters have a subgroup for each rection
print (" Reaction: %s" % current.getName())
for j in range(current.size()): # followed by the kinetic parameters
param = current.getChild(j)
print (" {0} = {1}".format(param.getName(), param.getValue(COPASI.CCore.Framework_Concentration)))
else: # otherwise we just have name value pairs
print (" {0} = {1}".format(current.getName(), current.getValue(COPASI.CCore.Framework_Concentration)))
def print_set(parameter_set):
# type: (COPASI.CModelParameterSet) -> None
"""This function takes a model parameter set and prints all of its content"""
print ("Parameter set: %s" % parameter_set.getObjectName())
parameter_set.compile() # need to compile otherwise we might not get concentrations
# time ... here we just take out the value of the first group / first child that will be the time
group = parameter_set.getModelParameter(0)
child = group.getChild(0)
print (" Initial time: %d" % child.getValue(COPASI.CCore.Framework_Concentration))
# compartment sizes
print_group(" Compartment Sizes", parameter_set.getModelParameter(1))
# species concentrations
print_group(" Species Concentrations", parameter_set.getModelParameter(2))
# global quantities
print_group(" Global Quantities", parameter_set.getModelParameter(3))
# kinetic parameters
print_group(" Kinetic Parameters", parameter_set.getModelParameter(4))
def print_model_parameter_set(input_file, parameter_set):
# type: (str, str) -> None
"""This function reads a COPASI file, and prints either all parameter sets, or the one with the given name"""
if not dm.loadModel(input_file):
print ("Model could not be loaded.")
print (COPASI.CCopasiMessage.getAllMessageText())
return
model = dm.getModel()
assert (isinstance(model, COPASI.CModel))
# get parameter sets
sets = model.getModelParameterSets()
for current in sets: # iterate over all of them
assert (isinstance(current, COPASI.CModelParameterSet))
if (parameter_set is not None) and (current.getObjectName() != parameter_set):
continue # parameter set was specified and current name is different so continue to another one
print_set(current)
if __name__ == "__main__":
num_args = len(sys.argv)
file_name = None
parameter_set = None
if num_args < 2:
print ("usage: print_model_parameterset <model file> [parameter set name]")
sys.exit(1)
file_name = sys.argv[1]
if num_args > 2:
parameter_set = sys.argv[2]
print_model_parameter_set(file_name, parameter_set)
| jonasfoe/COPASI | copasi/bindings/python/examples/print_model_parameterset.py | Python | artistic-2.0 | 3,153 | [
"COPASI"
] | 1bf5a396c797be0b873f3ed8d0d9a54e5f498f41b47289fa5bab856e31587640 |
""" Class defining a production step """
__RCSID__ = "$Id$"
import json
from DIRAC import S_OK, S_ERROR
class ProductionStep(object):
""" Define the Production Step object
"""
def __init__(self, **kwargs):
""" Simple constructor
"""
# Default values for transformation step parameters
self.Name = ''
self.Description = 'description'
self.LongDescription = 'longDescription'
self.Type = 'MCSimulation'
self.Plugin = 'Standard'
self.AgentType = 'Manual'
self.FileMask = ''
#########################################
self.ParentStep = None
self.Inputquery = None
self.Outputquery = None
self.GroupSize = 1
self.Body = 'body'
def getAsDict(self):
""" It returns the Step description as a dictionary
"""
prodStepDict = {}
prodStepDict['name'] = self.Name
prodStepDict['parentStep'] = []
# check the ParentStep format
if self.ParentStep:
if isinstance(self.ParentStep, list):
prodStepDict['parentStep'] = []
for parentStep in self.ParentStep:
if not parentStep.Name:
return S_ERROR('Parent Step does not exist')
prodStepDict['parentStep'].append(parentStep.Name)
elif isinstance(self.ParentStep, ProductionStep):
if not self.ParentStep.Name:
return S_ERROR('Parent Step does not exist')
prodStepDict['parentStep'] = [self.ParentStep.Name]
else:
return S_ERROR('Invalid Parent Step')
prodStepDict['description'] = self.Description
prodStepDict['longDescription'] = self.LongDescription
prodStepDict['stepType'] = self.Type
prodStepDict['plugin'] = self.Plugin
prodStepDict['agentType'] = self.AgentType
prodStepDict['fileMask'] = self.FileMask
# Optional fields
prodStepDict['inputquery'] = json.dumps(self.Inputquery)
prodStepDict['outputquery'] = json.dumps(self.Outputquery)
prodStepDict['groupsize'] = self.GroupSize
prodStepDict['body'] = json.dumps(self.Body)
return S_OK(prodStepDict)
| fstagni/DIRAC | ProductionSystem/Client/ProductionStep.py | Python | gpl-3.0 | 2,042 | [
"DIRAC"
] | 7d3b100b0ec057081bb147dada5632269be39480252c0d03a6c892fc29b4ff3f |
"""
This module handles initial database propagation, which is only run the first
time the game starts. It will create some default channels, objects, and
other things.
Everything starts at handle_setup()
"""
import time
from django.conf import settings
from django.utils.translation import gettext as _
from evennia.accounts.models import AccountDB
from evennia.server.models import ServerConfig
from evennia.utils import create, logger
ERROR_NO_SUPERUSER = """
No superuser exists yet. The superuser is the 'owner' account on
the Evennia server. Create a new superuser using the command
evennia createsuperuser
Follow the prompts, then restart the server.
"""
LIMBO_DESC = _(
"""
Welcome to your new |wEvennia|n-based game! Visit http://www.evennia.com if you need
help, want to contribute, report issues or just join the community.
As Account #1 you can create a demo/tutorial area with '|wbatchcommand tutorial_world.build|n'.
"""
)
WARNING_POSTGRESQL_FIX = """
PostgreSQL-psycopg2 compatibility fix:
The in-game channels {chan1}, {chan2} and {chan3} were created,
but the superuser was not yet connected to them. Please use in
game commands to connect Account #1 to those channels when first
logging in.
"""
def get_god_account():
"""
Creates the god user and don't take no for an answer.
"""
try:
god_account = AccountDB.objects.get(id=1)
except AccountDB.DoesNotExist:
raise AccountDB.DoesNotExist(ERROR_NO_SUPERUSER)
return god_account
def create_objects():
"""
Creates the #1 account and Limbo room.
"""
logger.log_info("Initial setup: Creating objects (Account #1 and Limbo room) ...")
# Set the initial User's account object's username on the #1 object.
# This object is pure django and only holds name, email and password.
god_account = get_god_account()
# Create an Account 'user profile' object to hold eventual
# mud-specific settings for the AccountDB object.
account_typeclass = settings.BASE_ACCOUNT_TYPECLASS
# run all creation hooks on god_account (we must do so manually
# since the manage.py command does not)
god_account.swap_typeclass(account_typeclass, clean_attributes=True)
god_account.basetype_setup()
god_account.at_account_creation()
god_account.locks.add(
"examine:perm(Developer);edit:false();delete:false();boot:false();msg:all()"
)
# this is necessary for quelling to work correctly.
god_account.permissions.add("Developer")
# Limbo is the default "nowhere" starting room
# Create the in-game god-character for account #1 and set
# it to exist in Limbo.
character_typeclass = settings.BASE_CHARACTER_TYPECLASS
god_character = create.create_object(character_typeclass, key=god_account.username, nohome=True)
god_character.id = 1
god_character.save()
god_character.db.desc = _("This is User #1.")
god_character.locks.add(
"examine:perm(Developer);edit:false();delete:false();boot:false();msg:all();puppet:false()"
)
# we set this low so that quelling is more useful
god_character.permissions.add("Player")
god_account.attributes.add("_first_login", True)
god_account.attributes.add("_last_puppet", god_character)
try:
god_account.db._playable_characters.append(god_character)
except AttributeError:
god_account.db_playable_characters = [god_character]
room_typeclass = settings.BASE_ROOM_TYPECLASS
limbo_obj = create.create_object(room_typeclass, _("Limbo"), nohome=True)
limbo_obj.id = 2
limbo_obj.save()
limbo_obj.db.desc = LIMBO_DESC.strip()
limbo_obj.save()
# Now that Limbo exists, try to set the user up in Limbo (unless
# the creation hooks already fixed this).
if not god_character.location:
god_character.location = limbo_obj
if not god_character.home:
god_character.home = limbo_obj
def create_channels():
"""
Creates some sensible default channels.
"""
logger.log_info("Initial setup: Creating default channels ...")
goduser = get_god_account()
channel_mudinfo = settings.CHANNEL_MUDINFO
if not channel_mudinfo:
raise RuntimeError("settings.CHANNEL_MUDINFO must be defined.")
channel = create.create_channel(**channel_mudinfo)
channel.connect(goduser)
channel_connectinfo = settings.CHANNEL_CONNECTINFO
if channel_connectinfo:
channel = create.create_channel(**channel_connectinfo)
for channeldict in settings.DEFAULT_CHANNELS:
channel = create.create_channel(**channeldict)
channel.connect(goduser)
def at_initial_setup():
"""
Custom hook for users to overload some or all parts of the initial
setup. Called very last in the sequence. It tries to import and
srun a module settings.AT_INITIAL_SETUP_HOOK_MODULE and will fail
silently if this does not exist or fails to load.
"""
modname = settings.AT_INITIAL_SETUP_HOOK_MODULE
if not modname:
return
try:
mod = __import__(modname, fromlist=[None])
except (ImportError, ValueError):
return
logger.log_info("Initial setup: Running at_initial_setup() hook.")
if mod.__dict__.get("at_initial_setup", None):
mod.at_initial_setup()
def collectstatic():
"""
Run collectstatic to make sure all web assets are loaded.
"""
from django.core.management import call_command
logger.log_info("Initial setup: Gathering static resources using 'collectstatic'")
call_command("collectstatic", "--noinput")
def reset_server():
"""
We end the initialization by resetting the server. This makes sure
the first login is the same as all the following ones,
particularly it cleans all caches for the special objects. It
also checks so the warm-reset mechanism works as it should.
"""
ServerConfig.objects.conf("server_epoch", time.time())
from evennia.server.sessionhandler import SESSIONS
logger.log_info("Initial setup complete. Restarting Server once.")
SESSIONS.portal_reset_server()
def handle_setup(last_step):
"""
Main logic for the module. It allows for restarting the
initialization at any point if one of the modules should crash.
Args:
last_step (int): The last stored successful step, for starting
over on errors. If `< 0`, initialization has finished and no
steps need to be redone.
"""
if last_step < 0:
# this means we don't need to handle setup since
# it already ran sucessfully once.
return
# if None, set it to 0
last_step = last_step or 0
# setting up the list of functions to run
setup_queue = [create_objects, create_channels, at_initial_setup, collectstatic, reset_server]
# step through queue, from last completed function
for num, setup_func in enumerate(setup_queue[last_step:]):
# run the setup function. Note that if there is a
# traceback we let it stop the system so the config
# step is not saved.
try:
setup_func()
except Exception:
if last_step + num == 1:
from evennia.objects.models import ObjectDB
for obj in ObjectDB.objects.all():
obj.delete()
elif last_step + num == 2:
from evennia.comms.models import ChannelDB
ChannelDB.objects.all().delete()
raise
# save this step
ServerConfig.objects.conf("last_initial_setup_step", last_step + num + 1)
# We got through the entire list. Set last_step to -1 so we don't
# have to run this again.
ServerConfig.objects.conf("last_initial_setup_step", -1)
| jamesbeebop/evennia | evennia/server/initial_setup.py | Python | bsd-3-clause | 7,807 | [
"VisIt"
] | 65505c9c44f4adf00c9071529952150c03544603ee85e6b8ea73ea51baad8f4b |
# -*-python-*-
#
# Copyright (C) 1999-2006 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
#
# popen.py: a replacement for os.popen()
#
# This implementation of popen() provides a cmd + args calling sequence,
# rather than a system() type of convention. The shell facilities are not
# available, but that implies we can avoid worrying about shell hacks in
# the arguments.
#
# -----------------------------------------------------------------------
import os
import sys
import sapi
import threading
import string
if sys.platform == "win32":
import win32popen
import win32event
import win32process
import debug
import StringIO
def popen(cmd, args, mode, capture_err=1):
if sys.platform == "win32":
command = win32popen.CommandLine(cmd, args)
if string.find(mode, 'r') >= 0:
hStdIn = None
if debug.SHOW_CHILD_PROCESSES:
dbgIn, dbgOut = None, StringIO.StringIO()
handle, hStdOut = win32popen.MakeSpyPipe(0, 1, (dbgOut,))
if capture_err:
hStdErr = hStdOut
dbgErr = dbgOut
else:
dbgErr = StringIO.StringIO()
x, hStdErr = win32popen.MakeSpyPipe(None, 1, (dbgErr,))
else:
handle, hStdOut = win32popen.CreatePipe(0, 1)
if capture_err:
hStdErr = hStdOut
else:
hStdErr = win32popen.NullFile(1)
else:
if debug.SHOW_CHILD_PROCESSES:
dbgIn, dbgOut, dbgErr = StringIO.StringIO(), StringIO.StringIO(), StringIO.StringIO()
hStdIn, handle = win32popen.MakeSpyPipe(1, 0, (dbgIn,))
x, hStdOut = win32popen.MakeSpyPipe(None, 1, (dbgOut,))
x, hStdErr = win32popen.MakeSpyPipe(None, 1, (dbgErr,))
else:
hStdIn, handle = win32popen.CreatePipe(0, 1)
hStdOut = None
hStdErr = None
phandle, pid, thandle, tid = win32popen.CreateProcess(command, hStdIn, hStdOut, hStdErr)
if debug.SHOW_CHILD_PROCESSES:
debug.Process(command, dbgIn, dbgOut, dbgErr)
return _pipe(win32popen.File2FileObject(handle, mode), phandle)
# flush the stdio buffers since we are about to change the FD under them
sys.stdout.flush()
sys.stderr.flush()
r, w = os.pipe()
pid = os.fork()
if pid:
# in the parent
# close the descriptor that we don't need and return the other one.
if string.find(mode, 'r') >= 0:
os.close(w)
return _pipe(os.fdopen(r, mode), pid)
os.close(r)
return _pipe(os.fdopen(w, mode), pid)
# in the child
# we'll need /dev/null for the discarded I/O
null = os.open('/dev/null', os.O_RDWR)
if string.find(mode, 'r') >= 0:
# hook stdout/stderr to the "write" channel
os.dup2(w, 1)
# "close" stdin; the child shouldn't use it
### this isn't quite right... we may want the child to read from stdin
os.dup2(null, 0)
# what to do with errors?
if capture_err:
os.dup2(w, 2)
else:
os.dup2(null, 2)
else:
# hook stdin to the "read" channel
os.dup2(r, 0)
# "close" stdout/stderr; the child shouldn't use them
### this isn't quite right... we may want the child to write to these
os.dup2(null, 1)
os.dup2(null, 2)
# don't need these FDs any more
os.close(null)
os.close(r)
os.close(w)
# the stdin/stdout/stderr are all set up. exec the target
try:
os.execvp(cmd, (cmd,) + tuple(args))
except:
# aid debugging, if the os.execvp above fails for some reason:
print "<h2>exec failed:</h2><pre>", cmd, string.join(args), "</pre>"
raise
# crap. shouldn't be here.
sys.exit(127)
def pipe_cmds(cmds, out=None):
"""Executes a sequence of commands. The output of each command is directed to
the input of the next command. A _pipe object is returned for writing to the
first command's input. The output of the last command is directed to the
"out" file object or the standard output if "out" is None. If "out" is not an
OS file descriptor, a separate thread will be spawned to send data to its
write() method."""
if out is None:
out = sys.stdout
if sys.platform == "win32":
### FIXME: windows implementation ignores "out" argument, always
### writing last command's output to standard out
if debug.SHOW_CHILD_PROCESSES:
dbgIn = StringIO.StringIO()
hStdIn, handle = win32popen.MakeSpyPipe(1, 0, (dbgIn,))
i = 0
for cmd in cmds:
i = i + 1
dbgOut, dbgErr = StringIO.StringIO(), StringIO.StringIO()
if i < len(cmds):
nextStdIn, hStdOut = win32popen.MakeSpyPipe(1, 1, (dbgOut,))
x, hStdErr = win32popen.MakeSpyPipe(None, 1, (dbgErr,))
else:
ehandle = win32event.CreateEvent(None, 1, 0, None)
nextStdIn, hStdOut = win32popen.MakeSpyPipe(None, 1, (dbgOut, sapi.server.file()), ehandle)
x, hStdErr = win32popen.MakeSpyPipe(None, 1, (dbgErr,))
command = win32popen.CommandLine(cmd[0], cmd[1:])
phandle, pid, thandle, tid = win32popen.CreateProcess(command, hStdIn, hStdOut, hStdErr)
if debug.SHOW_CHILD_PROCESSES:
debug.Process(command, dbgIn, dbgOut, dbgErr)
dbgIn = dbgOut
hStdIn = nextStdIn
else:
hStdIn, handle = win32popen.CreatePipe(1, 0)
spool = None
i = 0
for cmd in cmds:
i = i + 1
if i < len(cmds):
nextStdIn, hStdOut = win32popen.CreatePipe(1, 1)
else:
# very last process
nextStdIn = None
if sapi.server.inheritableOut:
# send child output to standard out
hStdOut = win32popen.MakeInheritedHandle(win32popen.FileObject2File(sys.stdout),0)
ehandle = None
else:
ehandle = win32event.CreateEvent(None, 1, 0, None)
x, hStdOut = win32popen.MakeSpyPipe(None, 1, (sapi.server.file(),), ehandle)
command = win32popen.CommandLine(cmd[0], cmd[1:])
phandle, pid, thandle, tid = win32popen.CreateProcess(command, hStdIn, hStdOut, None)
hStdIn = nextStdIn
return _pipe(win32popen.File2FileObject(handle, 'wb'), phandle, ehandle)
# flush the stdio buffers since we are about to change the FD under them
sys.stdout.flush()
sys.stderr.flush()
prev_r, parent_w = os.pipe()
null = os.open('/dev/null', os.O_RDWR)
child_pids = []
for cmd in cmds[:-1]:
r, w = os.pipe()
pid = os.fork()
if not pid:
# in the child
# hook up stdin to the "read" channel
os.dup2(prev_r, 0)
# hook up stdout to the output channel
os.dup2(w, 1)
# toss errors
os.dup2(null, 2)
# close these extra descriptors
os.close(prev_r)
os.close(parent_w)
os.close(null)
os.close(r)
os.close(w)
# time to run the command
try:
os.execvp(cmd[0], cmd)
except:
pass
sys.exit(127)
# in the parent
child_pids.append(pid)
# we don't need these any more
os.close(prev_r)
os.close(w)
# the read channel of this pipe will feed into to the next command
prev_r = r
# no longer needed
os.close(null)
# done with most of the commands. set up the last command to write to "out"
if not hasattr(out, 'fileno'):
r, w = os.pipe()
pid = os.fork()
if not pid:
# in the child (the last command)
# hook up stdin to the "read" channel
os.dup2(prev_r, 0)
# hook up stdout to "out"
if hasattr(out, 'fileno'):
if out.fileno() != 1:
os.dup2(out.fileno(), 1)
out.close()
else:
# "out" can't be hooked up directly, so use a pipe and a thread
os.dup2(w, 1)
os.close(r)
os.close(w)
# close these extra descriptors
os.close(prev_r)
os.close(parent_w)
# run the last command
try:
os.execvp(cmds[-1][0], cmds[-1])
except:
pass
sys.exit(127)
child_pids.append(pid)
# not needed any more
os.close(prev_r)
if not hasattr(out, 'fileno'):
os.close(w)
thread = _copy(r, out)
thread.start()
else:
thread = None
# write into the first pipe, wait on the final process
return _pipe(os.fdopen(parent_w, 'w'), child_pids, thread=thread)
class _copy(threading.Thread):
def __init__(self, srcfd, destfile):
self.srcfd = srcfd
self.destfile = destfile
threading.Thread.__init__(self)
def run(self):
try:
while 1:
s = os.read(self.srcfd, 1024)
if not s:
break
self.destfile.write(s)
finally:
os.close(self.srcfd)
class _pipe:
"Wrapper for a file which can wait() on a child process at close time."
def __init__(self, file, child_pid, done_event = None, thread = None):
self.file = file
self.child_pid = child_pid
if sys.platform == "win32":
if done_event:
self.wait_for = (child_pid, done_event)
else:
self.wait_for = (child_pid,)
else:
self.thread = thread
def eof(self):
### should be calling file.eof() here instead of file.close(), there
### may be data in the pipe or buffer after the process exits
if sys.platform == "win32":
r = win32event.WaitForMultipleObjects(self.wait_for, 1, 0)
if r == win32event.WAIT_OBJECT_0:
self.file.close()
self.file = None
return win32process.GetExitCodeProcess(self.child_pid)
return None
if self.thread and self.thread.isAlive():
return None
pid, status = os.waitpid(self.child_pid, os.WNOHANG)
if pid:
self.file.close()
self.file = None
return status
return None
def close(self):
if self.file:
self.file.close()
self.file = None
if sys.platform == "win32":
win32event.WaitForMultipleObjects(self.wait_for, 1, win32event.INFINITE)
return win32process.GetExitCodeProcess(self.child_pid)
else:
if self.thread:
self.thread.join()
if type(self.child_pid) == type([]):
for pid in self.child_pid:
exit = os.waitpid(pid, 0)[1]
return exit
else:
return os.waitpid(self.child_pid, 0)[1]
return None
def __getattr__(self, name):
return getattr(self.file, name)
def __del__(self):
self.close()
| foresthz/fusion5.1 | www/scm/viewvc/lib/popen.py | Python | gpl-2.0 | 10,509 | [
"VisIt"
] | 3016bbc6f6deb931ef5fffc4d7f618155e1cb8973e602a70098d75cf5230217a |
'''
'''
from __future__ import print_function
import shutil
from os.path import dirname, exists, join, realpath, relpath
import os, re, subprocess, sys, time
import versioneer
# provide fallbacks for highlights in case colorama is not installed
try:
import colorama
from colorama import Fore, Style
def bright(text): return "%s%s%s" % (Style.BRIGHT, text, Style.RESET_ALL)
def dim(text): return "%s%s%s" % (Style.DIM, text, Style.RESET_ALL)
def red(text): return "%s%s%s" % (Fore.RED, text, Style.RESET_ALL)
def green(text): return "%s%s%s" % (Fore.GREEN, text, Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (Fore.YELLOW, text, Style.RESET_ALL)
sys.platform == "win32" and colorama.init()
except ImportError:
def bright(text): return text
def dim(text): return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
# some functions prompt for user input, handle input vs raw_input (py2 vs py3)
if sys.version_info[0] < 3:
input = raw_input # NOQA
# -----------------------------------------------------------------------------
# Module global variables
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
# -----------------------------------------------------------------------------
# Helpers for command line operations
# -----------------------------------------------------------------------------
def show_bokehjs(bokehjs_action, develop=False):
''' Print a useful report after setuptools output describing where and how
BokehJS is installed.
Args:
bokehjs_action (str) : one of 'built', 'installed', or 'packaged'
how (or if) BokehJS was installed into the python source tree
develop (bool, optional) :
whether the command was for "develop" mode (default: False)
Returns:
None
'''
print()
if develop:
print("Installed Bokeh for DEVELOPMENT:")
else:
print("Installed Bokeh:")
if bokehjs_action in ['built', 'installed']:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if bokehjs_action=='built' else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
print()
def show_help(bokehjs_action):
''' Print information about extra Bokeh-specific command line options.
Args:
bokehjs_action (str) : one of 'built', 'installed', or 'packaged'
how (or if) BokehJS was installed into the python source tree
Returns:
None
'''
print()
if bokehjs_action in ['built', 'installed']:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build-js build and install a fresh BokehJS")
print(" --install-js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print("No extra Bokeh-specific options are available.")
print()
# -----------------------------------------------------------------------------
# Other functions used directly by setup.py
# -----------------------------------------------------------------------------
def build_or_install_bokehjs():
''' Build a new BokehJS (and install it) or install a previously build
BokehJS.
If no options ``--build-js`` or ``--install-js`` are detected, the
user is prompted for what to do.
If ``--existing-js`` is detected, then this setup.py is being run from a
packaged sdist, no action is taken.
Note that ``-build-js`` is only compatible with the following ``setup.py``
commands: install, develop, sdist, egg_info, build
Returns:
str : one of 'built', 'installed', 'packaged'
How (or if) BokehJS was installed into the python source tree
'''
# This happens when building from inside a published, pre-packaged sdist
# The --existing-js option is not otherwise documented
if '--existing-js' in sys.argv:
sys.argv.remove('--existing-js')
return "packaged"
if '--build-js' not in sys.argv and '--install-js' not in sys.argv:
jsbuild = jsbuild_prompt()
elif '--build-js' in sys.argv:
jsbuild = True
sys.argv.remove('--build-js')
# must be "--install-js"
else:
jsbuild = False
sys.argv.remove('--install-js')
jsbuild_ok = ('install', 'develop', 'sdist', 'egg_info', 'build')
if jsbuild and not any(arg in sys.argv for arg in jsbuild_ok):
print("Error: Option '--build-js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
if jsbuild:
build_js()
install_js()
return "built"
else:
install_js()
return "installed"
def fixup_building_sdist():
''' Check for 'sdist' and ensure we always build BokehJS when packaging
Source distributions do not ship with BokehJS source code, but must ship
with a pre-built BokehJS library. This function modifies ``sys.argv`` as
necessary so that ``--build-js`` IS present, and ``--install-js` is NOT.
Returns:
None
'''
if "sdist" in sys.argv:
if "--install-js" in sys.argv:
print("Removing '--install-js' incompatible with 'sdist'")
sys.argv.remove('--install-js')
if "--build-js" not in sys.argv:
print("Adding '--build-js' required for 'sdist'")
sys.argv.append('--build-js')
def fixup_for_packaged():
''' If we are installing FROM an sdist, then a pre-built BokehJS is
already installed in the python source tree.
The command line options ``--build-js`` or ``--install-js`` are
removed from ``sys.argv``, with a warning.
Also adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is
already packaged.
Returns:
None
'''
if exists(join(ROOT, 'PKG-INFO')):
if "--build-js" in sys.argv or "--install-js" in sys.argv:
print(SDIST_BUILD_WARNING)
if "--build-js" in sys.argv:
sys.argv.remove('--build-js')
if "--install-js" in sys.argv:
sys.argv.remove('--install-js')
if "--existing-js" not in sys.argv:
sys.argv.append('--existing-js')
def fixup_old_jsargs():
''' Fixup (and warn about) old style command line options with underscores.
This function modifies ``sys.argv`` to make the replacements:
* ``--build_js`` to --build-js
* ``--install_js`` to --install-js
and prints a warning about their deprecation.
Returns:
None
'''
for i in range(len(sys.argv)):
if sys.argv[i] == '--build_js':
print("WARNING: --build_js (with underscore) is deprecated, use --build-js")
sys.argv[i] = '--build-js'
if sys.argv[i] == '--install_js':
print("WARNING: --install_js (with underscore) is deprecated, use --install-js")
sys.argv[i] = '--install-js'
# Horrible hack: workaround to allow creation of bdist_wheel on pip
# installation. Why, for God's sake, is pip forcing the generation of wheels
# when installing a package?
def get_cmdclass():
''' A ``cmdclass`` that works around a setuptools deficiency.
There is no need to build wheels when installing a package, however some
versions of setuptools seem to mandate this. This is a hacky workaround
that modifies the ``cmdclass`` returned by versioneer so that not having
wheel installed is not a fatal error.
'''
cmdclass = versioneer.get_cmdclass()
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
# pip is not claiming for bdist_wheel when wheel is not installed
bdist_wheel = None
if bdist_wheel is not None:
cmdclass["bdist_wheel"] = bdist_wheel
return cmdclass
def get_package_data():
''' All of all of the "extra" package data files collected by the
``package_files`` and ``package_path`` functions in ``setup.py``.
'''
return { 'bokeh': _PACKAGE_DATA }
def get_version():
''' The version of Bokeh currently checked out
Returns:
str : the version string
'''
return versioneer.get_version()
# -----------------------------------------------------------------------------
# Helpers for operation in the bokehjs dir
# -----------------------------------------------------------------------------
def jsbuild_prompt():
''' Prompt users whether to build a new BokehJS or install an existing one.
Returns:
bool : True, if a new build is requested, False otherwise
'''
print(BOKEHJS_BUILD_PROMPT)
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
# -----------------------------------------------------------------------------
# Helpers for operations in the bokehjs dir
# -----------------------------------------------------------------------------
def build_js():
''' Build BokehJS files (CSS, JS, etc) under the ``bokehjs`` source
subdirectory.
Also prints a table of statistics about the generated assets (file sizes,
etc.) or any error messages if the build fails.
Note this function only builds BokehJS assets, it does not install them
into the python source tree.
'''
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
outmsg = proc.stdout.read().decode('ascii', errors='ignore')
outmsg = "\n".join([" " + x for x in outmsg.split("\n")])
errmsg = proc.stderr.read().decode('ascii', errors='ignore')
errmsg = "\n".join([" " + x for x in errmsg.split("\n")])
print(BUILD_FAIL_MSG % (red(outmsg), red(errmsg)))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
m = pat.match(line)
if not m: continue # skip generate.py output lines
stamp, txt = m.groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
def size(*path):
return os.stat(join("bokehjs", "build", *path)).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % size("js", "bokeh.js"))
print(" - bokeh.css : %6.1f KB" % size("css", "bokeh.css"))
print(" - bokeh.min.js : %6.1f KB" % size("js", "bokeh.min.js"))
print(" - bokeh.min.css : %6.1f KB" % size("css", "bokeh.min.css"))
print(" - bokeh-widgets.js : %6.1f KB" % size("js", "bokeh-widgets.js"))
print(" - bokeh-widgets.css : %6.1f KB" % size("css", "bokeh-widgets.css"))
print(" - bokeh-widgets.min.js : %6.1f KB" % size("js", "bokeh-widgets.min.js"))
print(" - bokeh-widgets.min.css : %6.1f KB" % size("css", "bokeh-widgets.min.css"))
print(" - bokeh-api.js : %6.1f KB" % size("js", "bokeh-api.js"))
print(" - bokeh-api.min.js : %6.1f KB" % size("js", "bokeh-api.min.js"))
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
sys.exit(1)
def install_js():
''' Copy built BokehJS files into the Python source tree.
Returns:
None
'''
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print(BOKEHJS_INSTALL_FAIL)
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
# -----------------------------------------------------------------------------
# Helpers for collecting package data
# -----------------------------------------------------------------------------
_PACKAGE_DATA = []
def package_files(*paths):
'''
'''
_PACKAGE_DATA.extend(paths)
def package_path(path, filters=()):
'''
'''
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
_PACKAGE_DATA.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
_PACKAGE_DATA.append(join(path, f))
# -----------------------------------------------------------------------------
# Status and error message strings
# -----------------------------------------------------------------------------
BOKEHJS_BUILD_PROMPT = """
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
"""
BOKEHJS_INSTALL_FAIL = """
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build-js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
"""
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned the following
---- on stdout:
%s
---- on stderr:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
SDIST_BUILD_WARNING = """
Source distribution (sdist) packages come with PRE-BUILT BokehJS files.
Building/installing from the bokehjs source directory of sdist packages is
disabled, and the options --build-js and --install-js will be IGNORED.
To build or develop BokehJS yourself, you must clone the full Bokeh GitHub
repository from https://github.com/bokeh/bokeh
"""
| percyfal/bokeh | _setup_support.py | Python | bsd-3-clause | 15,547 | [
"GULP"
] | 106e01b11f649f0f0e0b231dd65eef0d83e33b26e5908871aab946cfc459ea35 |
""" DIRAC JobDB class is a front-end to the main WMS database containing
job definitions and status information. It is used in most of the WMS
components
The following methods are provided for public usage:
getJobAttribute()
getJobAttributes()
getAllJobAttributes()
getDistinctJobAttributes()
getAttributesForJobList()
getJobParameter()
getJobParameters()
getAllJobParameters()
getInputData()
getJobJDL()
selectJobs()
selectJobsWithStatus()
setJobAttribute()
setJobAttributes()
setJobParameter()
setJobParameters()
setJobJDL()
setJobStatus()
setInputData()
insertNewJobIntoDB()
removeJobFromDB()
rescheduleJob()
rescheduleJobs()
getMask()
setMask()
allowSiteInMask()
banSiteInMask()
getCounters()
"""
from __future__ import print_function, absolute_import, division
from six.moves import range
__RCSID__ = "$Id$"
import operator
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.DErrno import EWMSSUBM
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Base.DB import DB
from DIRAC.WorkloadManagementSystem.Client.JobState.JobManifest import JobManifest
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
#############################################################################
JOB_STATES = ['Submitting', 'Received', 'Checking', 'Staging', 'Waiting', 'Matched',
'Running', 'Stalled', 'Done', 'Completed', 'Failed']
JOB_FINAL_STATES = ['Done', 'Completed', 'Failed']
class JobDB(DB):
""" Interface to MySQL-based JobDB
"""
def __init__(self):
""" Standard Constructor
"""
DB.__init__(self, 'JobDB', 'WorkloadManagement/JobDB')
# data member to check if __init__ went through without error
self.__initialized = False
self.maxRescheduling = self.getCSOption('MaxRescheduling', 3)
# loading the function that will be used to determine the platform (it can be VO specific)
res = ObjectLoader().loadObject("ConfigurationSystem.Client.Helpers.Resources", 'getDIRACPlatform')
if not res['OK']:
self.log.fatal(res['Message'])
self.getDIRACPlatform = res['Value']
self.jobAttributeNames = []
self.siteClient = SiteStatus()
result = self.__getAttributeNames()
if not result['OK']:
self.log.fatal('JobDB: Can not retrieve job Attributes')
return
self.jdl2DBParameters = ['JobName', 'JobType', 'JobGroup']
self.log.info("MaxReschedule", self.maxRescheduling)
self.log.info("==================================================")
self.__initialized = True
def isValid(self):
""" Check if correctly initialised """
return self.__initialized
def __getAttributeNames(self):
""" get Name of Job Attributes defined in DB
set self.jobAttributeNames to the list of Names
return S_OK()
return S_ERROR upon error
"""
res = self._query('DESCRIBE Jobs')
if not res['OK']:
return res
self.jobAttributeNames = [row[0] for row in res['Value']]
return S_OK()
#############################################################################
def getAttributesForJobList(self, jobIDList, attrList=None):
""" Get attributes for the jobs in the the jobIDList.
Returns an S_OK structure with a dictionary of dictionaries as its Value:
ValueDict[jobID][attribute_name] = attribute_value
"""
if not jobIDList:
return S_OK({})
if attrList:
missingAttr = [repr(x) for x in attrList if x not in self.jobAttributeNames]
if missingAttr:
return S_ERROR("JobDB.getAttributesForJobList: Unknown Attribute(s): %s" % ", ".join(missingAttr))
attrNames = ','.join(str(x) for x in attrList if x in self.jobAttributeNames)
attr_tmp_list = attrList
else:
attrNames = ','.join(self.jobAttributeNames)
attr_tmp_list = self.jobAttributeNames
jobList = ','.join([str(x) for x in jobIDList])
cmd = 'SELECT JobID,%s FROM Jobs WHERE JobID in ( %s )' % (attrNames, jobList)
res = self._query(cmd)
if not res['OK']:
return res
try:
retDict = {}
for retValues in res['Value']:
jobID = retValues[0]
jobDict = {'JobID': jobID}
# Make a dict from the list of attributes names and values
for name, value in zip(attr_tmp_list, retValues[1:]):
try:
value = value.tostring()
except BaseException:
value = str(value)
jobDict[name] = value
retDict[int(jobID)] = jobDict
return S_OK(retDict)
except BaseException as e:
return S_ERROR('JobDB.getAttributesForJobList: Failed\n%s' % repr(e))
#############################################################################
def getDistinctJobAttributes(self, attribute, condDict=None, older=None,
newer=None, timeStamp='LastUpdateTime'):
""" Get distinct values of the job attribute under specified conditions
"""
return self.getDistinctAttributeValues('Jobs', attribute, condDict=condDict,
older=older, newer=newer, timeStamp=timeStamp)
#############################################################################
def traceJobParameter(self, site, localID, parameter, date=None, until=None):
ret = self.traceJobParameters(site, localID, [parameter], None, date, until)
if not ret['OK']:
return ret
returnDict = {}
for jobID in ret['Value']:
returnDict[jobID] = ret['Value'][jobID].get(parameter)
return S_OK(returnDict)
#############################################################################
def traceJobParameters(self, site, localIDs, paramList=None, attributeList=None, date=None, until=None):
import datetime
exactTime = False
if not attributeList:
attributeList = []
attributeList = list(set(attributeList) | set(['StartExecTime', 'SubmissionTime', 'HeartBeatTime',
'EndExecTime', 'JobName', 'OwnerDN', 'OwnerGroup']))
try:
if isinstance(localIDs, (list, dict)):
localIDs = [int(localID) for localID in localIDs]
else:
localIDs = [int(localIDs)]
except BaseException:
return S_ERROR("localIDs must be integers")
now = datetime.datetime.utcnow()
if until:
if until.lower() == 'now':
until = now
else:
try:
until = datetime.datetime.strptime(until, '%Y-%m-%d')
except BaseException:
return S_ERROR("Error in format for 'until', expected '%Y-%m-%d'")
if not date:
until = now
since = until - datetime.timedelta(hours=24)
else:
since = None
for dFormat in ('%Y-%m-%d', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S'):
try:
since = datetime.datetime.strptime(date, dFormat)
break
except BaseException:
exactTime = True
if not since:
return S_ERROR('Error in date format')
if exactTime:
exactTime = since
if not until:
until = now
else:
if not until:
until = since + datetime.timedelta(hours=24)
if since > now:
return S_ERROR('Cannot find jobs in the future')
if until > now:
until = now
result = self.selectJobs({'Site': site}, older=str(until), newer=str(since))
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('No jobs found at %s for date %s' % (site, date))
resultDict = {'Successful': {}, 'Failed': {}}
for jobID in result['Value']:
if jobID:
ret = self.getJobParameter(jobID, 'LocalJobID')
if not ret['OK']:
return ret
localID = ret['Value']
if localID and int(localID) in localIDs:
attributes = self.getJobAttributes(jobID, attributeList)
if not attributes['OK']:
return attributes
attributes = attributes['Value']
if exactTime:
for att in ('StartExecTime', 'SubmissionTime'):
startTime = attributes.get(att)
if startTime == 'None':
startTime = None
if startTime:
break
startTime = datetime.datetime.strptime(startTime, '%Y-%m-%d %H:%M:%S') if startTime else now
for att in ('EndExecTime', 'HeartBeatTime'):
lastTime = attributes.get(att)
if lastTime == 'None':
lastTime = None
if lastTime:
break
lastTime = datetime.datetime.strptime(lastTime, '%Y-%m-%d %H:%M:%S') if lastTime else now
okTime = (exactTime >= startTime and exactTime <= lastTime)
else:
okTime = True
if okTime:
ret = self.getJobParameters(jobID, paramList=paramList)
if not ret['OK']:
return ret
attributes.update(ret['Value'].get(jobID, {}))
resultDict['Successful'].setdefault(int(localID), {})[int(jobID)] = attributes
for localID in localIDs:
if localID not in resultDict['Successful']:
resultDict['Failed'][localID] = 'localID not found'
return S_OK(resultDict)
#############################################################################
def getJobParameters(self, jobID, paramList=None):
""" Get Job Parameters defined for jobID.
Returns a dictionary with the Job Parameters.
If parameterList is empty - all the parameters are returned.
"""
if isinstance(jobID, (basestring, int, long)):
jobID = [jobID]
jobIDList = []
for jID in jobID:
ret = self._escapeString(str(jID))
if not ret['OK']:
return ret
jobIDList.append(ret['Value'])
self.log.debug('JobDB.getParameters: Getting Parameters for jobs %s' % ','.join(jobIDList))
resultDict = {}
if paramList:
if isinstance(paramList, basestring):
paramList = paramList.split(',')
paramNameList = []
for pn in paramList:
ret = self._escapeString(pn)
if not ret['OK']:
return ret
paramNameList.append(ret['Value'])
cmd = "SELECT JobID, Name, Value FROM JobParameters WHERE JobID IN (%s) AND Name IN (%s)" % \
(','.join(jobIDList), ','.join(paramNameList))
result = self._query(cmd)
if result['OK']:
if result['Value']:
for res_jobID, res_name, res_value in result['Value']:
try:
res_value = res_value.tostring()
except BaseException:
pass
resultDict.setdefault(res_jobID, {})[res_name] = res_value
return S_OK(resultDict) # there's a slim chance that this is an empty dictionary
else:
return S_ERROR('JobDB.getJobParameters: failed to retrieve parameters')
else:
result = self.getFields('JobParameters', ['JobID', 'Name', 'Value'], {'JobID': jobID})
if not result['OK']:
return result
for res_jobID, res_name, res_value in result['Value']:
try:
res_value = res_value.tostring()
except BaseException:
pass
resultDict.setdefault(res_jobID, {})[res_name] = res_value
return S_OK(resultDict) # there's a slim chance that this is an empty dictionary
#############################################################################
def getAtticJobParameters(self, jobID, paramList=None, rescheduleCounter=-1):
""" Get Attic Job Parameters defined for a job with jobID.
Returns a dictionary with the Attic Job Parameters per each rescheduling cycle.
If parameterList is empty - all the parameters are returned.
If recheduleCounter = -1, all cycles are returned.
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
self.log.debug('JobDB.getAtticJobParameters: Getting Attic Parameters for job %s' % jobID)
resultDict = {}
paramCondition = ''
if paramList:
paramNameList = []
for x in paramList:
ret = self._escapeString(x)
if not ret['OK']:
return ret
paramNameList.append(x)
paramNames = ','.join(paramNameList)
paramCondition = " AND Name in (%s)" % paramNames
rCounter = ''
if rescheduleCounter != -1:
rCounter = ' AND RescheduleCycle=%d' % int(rescheduleCounter)
cmd = "SELECT Name, Value, RescheduleCycle from AtticJobParameters"
cmd += " WHERE JobID=%s %s %s" % (jobID, paramCondition, rCounter)
result = self._query(cmd)
if result['OK']:
if result['Value']:
for name, value, counter in result['Value']:
try:
value = value.tostring()
except BaseException:
pass
resultDict.setdefault(counter, {})[name] = value
return S_OK(resultDict)
else:
return S_ERROR('JobDB.getAtticJobParameters: failed to retrieve parameters')
#############################################################################
def getJobAttributes(self, jobID, attrList=None):
""" Get all Job Attributes for a given jobID.
Return a dictionary with all Job Attributes,
return an empty dictionary if matching job found
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
attrNameList = []
for x in attrList if attrList else self.jobAttributeNames:
ret = self._escapeString(x)
if not ret['OK']:
return ret
x = "`" + ret['Value'][1:-1] + "`"
attrNameList.append(x)
attrNames = ','.join(attrNameList)
self.log.debug('JobDB.getAllJobAttributes: Getting Attributes for job = %s.' % jobID)
cmd = 'SELECT %s FROM Jobs WHERE JobID=%s' % (attrNames, jobID)
res = self._query(cmd)
if not res['OK']:
return res
if not res['Value']:
return S_OK({})
values = res['Value'][0]
attributes = {}
for name, value in zip(attrList if attrList else self.jobAttributeNames, values):
attributes[name] = str(value)
return S_OK(attributes)
#############################################################################
def getJobAttribute(self, jobID, attribute):
""" Get the given attribute of a job specified by its jobID
"""
result = self.getJobAttributes(jobID, [attribute])
if result['OK']:
value = result['Value'][attribute]
return S_OK(value)
return result
#############################################################################
def getJobParameter(self, jobID, parameter):
""" Get the given parameter of a job specified by its jobID
"""
result = self.getJobParameters(jobID, [parameter])
if not result['OK']:
return result
return S_OK(result.get('Value', {}).get(jobID, {}).get(parameter))
#############################################################################
def getJobOptParameter(self, jobID, parameter):
""" Get optimizer parameters for the given job.
"""
result = self.getFields('OptimizerParameters', ['Value'], {'JobID': jobID, 'Name': parameter})
if result['OK']:
if result['Value']:
return S_OK(result['Value'][0][0])
return S_ERROR('Parameter not found')
return S_ERROR('Failed to access database')
#############################################################################
def getJobOptParameters(self, jobID, paramList=None):
""" Get optimizer parameters for the given job. If the list of parameter names is
empty, get all the parameters then
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
resultDict = {}
if paramList:
paramNameList = []
for x in paramList:
ret = self._escapeString(x)
if not ret['OK']:
return ret
paramNameList.append(ret['Value'])
paramNames = ','.join(paramNameList)
cmd = "SELECT Name, Value from OptimizerParameters WHERE JobID=%s and Name in (%s)" % (jobID, paramNames)
else:
cmd = "SELECT Name, Value from OptimizerParameters WHERE JobID=%s" % jobID
result = self._query(cmd)
if result['OK']:
if result['Value']:
for name, value in result['Value']:
try:
value = value.tostring()
except BaseException:
pass
resultDict[name] = value
return S_OK(resultDict)
else:
return S_ERROR('JobDB.getJobOptParameters: failed to retrieve parameters')
#############################################################################
def getInputData(self, jobID):
"""Get input data for the given job
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
cmd = 'SELECT LFN FROM InputData WHERE JobID=%s' % jobID
res = self._query(cmd)
if not res['OK']:
return res
inputData = [i[0] for i in res['Value'] if i[0].strip()]
for index, lfn in enumerate(inputData):
if lfn.lower().startswith('lfn:'):
inputData[index] = lfn[4:]
return S_OK(inputData)
#############################################################################
def setInputData(self, jobID, inputData):
"""Inserts input data for the given job
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
cmd = 'DELETE FROM InputData WHERE JobID=%s' % (jobID)
result = self._update(cmd)
if not result['OK']:
result = S_ERROR('JobDB.setInputData: operation failed.')
for lfn in inputData:
# some jobs are setting empty string as InputData
if not lfn:
continue
ret = self._escapeString(lfn.strip())
if not ret['OK']:
return ret
lfn = ret['Value']
cmd = 'INSERT INTO InputData (JobID,LFN) VALUES (%s, %s )' % (jobID, lfn)
res = self._update(cmd)
if not res['OK']:
return res
return S_OK('Files added')
#############################################################################
def setOptimizerChain(self, jobID, optimizerList):
""" Set the optimizer chain for the given job. The 'TaskQueue'
optimizer should be the last one in the chain, it is added
if not present in the optimizerList
"""
optString = ','.join(optimizerList)
return self.setJobOptParameter(jobID, 'OptimizerChain', optString)
#############################################################################
def setNextOptimizer(self, jobID, currentOptimizer):
""" Set the job status to be processed by the next optimizer in the
chain
"""
result = self.getJobOptParameter(jobID, 'OptimizerChain')
if not result['OK']:
return result
optList = result['Value'].split(',')
if currentOptimizer not in optList:
return S_ERROR('Could not find ' + currentOptimizer + ' in chain')
try:
# Append None to get a list of (opt,nextOpt)
optList.append(None)
nextOptimizer = None
for opt, nextOptimizer in zip(optList[:-1], optList[1:]):
if opt == currentOptimizer:
break
if nextOptimizer is None:
return S_ERROR('Unexpected end of the Optimizer Chain')
except ValueError:
return S_ERROR('The ' + currentOptimizer + ' not found in the chain')
result = self.setJobStatus(jobID, status="Checking", minor=nextOptimizer)
if not result['OK']:
return result
return S_OK(nextOptimizer)
############################################################################
def selectJobs(self, condDict, older=None, newer=None, timeStamp='LastUpdateTime',
orderAttribute=None, limit=None):
""" Select jobs matching the following conditions:
- condDict dictionary of required Key = Value pairs;
- with the last update date older and/or newer than given dates;
The result is ordered by JobID if requested, the result is limited to a given
number of jobs if requested.
"""
self.log.debug('JobDB.selectJobs: retrieving jobs.')
res = self.getFields('Jobs', ['JobID'], condDict=condDict, limit=limit,
older=older, newer=newer, timeStamp=timeStamp, orderAttribute=orderAttribute)
if not res['OK']:
return res
if not res['Value']:
return S_OK([])
return S_OK([self._to_value(i) for i in res['Value']])
#############################################################################
def setJobAttribute(self, jobID, attrName, attrValue, update=False, myDate=None):
""" Set an attribute value for job specified by jobID.
The LastUpdate time stamp is refreshed if explicitly requested
:param jobID: job ID
:type jobID: int or str
:param str attrName: attribute name
:param str attrValue: attribute value
:param bool update: optional flag to update the job LastUpdateTime stamp
:param str myDate: optional time stamp for the LastUpdateTime attribute
:return: S_OK/S_ERROR
"""
if attrName not in self.jobAttributeNames:
return S_ERROR(EWMSSUBM, 'Request to set non-existing job attribute')
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString(attrValue)
if not ret['OK']:
return ret
value = ret['Value']
if update:
cmd = "UPDATE Jobs SET %s=%s,LastUpdateTime=UTC_TIMESTAMP() WHERE JobID=%s" % (attrName, value, jobID)
else:
cmd = "UPDATE Jobs SET %s=%s WHERE JobID=%s" % (attrName, value, jobID)
if myDate:
cmd += ' AND LastUpdateTime < %s' % myDate
res = self._update(cmd)
if res['OK']:
return res
return S_ERROR('JobDB.setAttribute: failed to set attribute')
#############################################################################
def setJobAttributes(self, jobID, attrNames, attrValues, update=False, myDate=None):
""" Set one or more attribute values for one or more jobs specified by jobID.
The LastUpdate time stamp is refreshed if explicitly requested with the update flag
:param jobID: one or more job IDs
:type jobID: int or str or python:list
:param list attrNames: names of attributes to update
:param list attrValues: corresponding values of attributes to update
:param bool update: optional flag to update the job LastUpdateTime stamp
:param str myDate: optional time stamp for the LastUpdateTime attribute
:return: S_OK/S_ERROR
"""
jobIDList = jobID
if not isinstance(jobID, (list, tuple)):
jobIDList = [jobID]
jIDList = []
for jID in jobIDList:
ret = self._escapeString(jID)
if not ret['OK']:
return ret
jIDList.append(ret['Value'])
if len(attrNames) != len(attrValues):
return S_ERROR('JobDB.setAttributes: incompatible Argument length')
for attrName in attrNames:
if attrName not in self.jobAttributeNames:
return S_ERROR(EWMSSUBM, 'Request to set non-existing job attribute')
attr = []
for name, value in zip(attrNames, attrValues):
ret = self._escapeString(value)
if not ret['OK']:
return ret
attr.append("%s=%s" % (name, ret['Value']))
if update:
attr.append("LastUpdateTime=UTC_TIMESTAMP()")
if not attr:
return S_ERROR('JobDB.setAttributes: Nothing to do')
cmd = 'UPDATE Jobs SET %s WHERE JobID in ( %s )' % (', '.join(attr), ', '.join(jIDList))
if myDate:
cmd += ' AND LastUpdateTime < %s' % myDate
return self._transaction([cmd])
#############################################################################
def setJobStatus(self, jobID, status='', minor='', application=''):
""" Set status of the job specified by its jobID
"""
# Do not update the LastUpdate time stamp if setting the Stalled status
update_flag = True
if status == "Stalled":
update_flag = False
attrNames = []
attrValues = []
if status:
attrNames.append('Status')
attrValues.append(status)
if minor:
attrNames.append('MinorStatus')
attrValues.append(minor)
if application:
attrNames.append('ApplicationStatus')
attrValues.append(application[:255])
result = self.setJobAttributes(jobID, attrNames, attrValues, update=update_flag)
if not result['OK']:
return result
return S_OK()
#############################################################################
def setEndExecTime(self, jobID, endDate=None):
""" Set EndExecTime time stamp
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
if endDate:
ret = self._escapeString(endDate)
if not ret['OK']:
return ret
endDate = ret['Value']
req = "UPDATE Jobs SET EndExecTime=%s WHERE JobID=%s AND EndExecTime IS NULL" % (endDate, jobID)
else:
req = "UPDATE Jobs SET EndExecTime=UTC_TIMESTAMP() WHERE JobID=%s AND EndExecTime IS NULL" % jobID
return self._update(req)
#############################################################################
def setStartExecTime(self, jobID, startDate=None):
""" Set StartExecTime time stamp
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
if startDate:
ret = self._escapeString(startDate)
if not ret['OK']:
return ret
startDate = ret['Value']
req = "UPDATE Jobs SET StartExecTime=%s WHERE JobID=%s AND StartExecTime IS NULL" % (startDate, jobID)
else:
req = "UPDATE Jobs SET StartExecTime=UTC_TIMESTAMP() WHERE JobID=%s AND StartExecTime IS NULL" % jobID
return self._update(req)
#############################################################################
def setJobParameter(self, jobID, key, value):
""" Set a parameter specified by name,value pair for the job JobID
"""
ret = self._escapeString(key)
if not ret['OK']:
return ret
e_key = ret['Value']
ret = self._escapeString(value)
if not ret['OK']:
return ret
e_value = ret['Value']
cmd = 'REPLACE JobParameters (JobID,Name,Value) VALUES (%d,%s,%s)' % (int(jobID), e_key, e_value)
return self._update(cmd)
#############################################################################
def setJobParameters(self, jobID, parameters):
""" Set parameters specified by a list of name/value pairs for the job JobID
"""
if not parameters:
return S_OK()
insertValueList = []
for name, value in parameters:
ret = self._escapeString(name)
if not ret['OK']:
return ret
e_name = ret['Value']
ret = self._escapeString(value)
if not ret['OK']:
return ret
e_value = ret['Value']
insertValueList.append('(%s,%s,%s)' % (jobID, e_name, e_value))
cmd = 'REPLACE JobParameters (JobID,Name,Value) VALUES %s' % ', '.join(insertValueList)
return self._update(cmd)
#############################################################################
def setJobOptParameter(self, jobID, name, value):
""" Set an optimzer parameter specified by name,value pair for the job JobID
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
e_jobID = ret['Value']
ret = self._escapeString(name)
if not ret['OK']:
return ret
e_name = ret['Value']
cmd = 'DELETE FROM OptimizerParameters WHERE JobID=%s AND Name=%s' % (e_jobID, e_name)
res = self._update(cmd)
if not res['OK']:
return res
return self.insertFields('OptimizerParameters', ['JobID', 'Name', 'Value'], [jobID, name, value])
#############################################################################
def removeJobOptParameter(self, jobID, name):
""" Remove the specified optimizer parameter for jobID
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString(name)
if not ret['OK']:
return ret
name = ret['Value']
cmd = 'DELETE FROM OptimizerParameters WHERE JobID=%s AND Name=%s' % (jobID, name)
return self._update(cmd)
#############################################################################
def setAtticJobParameter(self, jobID, key, value, rescheduleCounter):
""" Set attic parameter for job specified by its jobID when job rescheduling
for later debugging
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString(key)
if not ret['OK']:
return ret
key = ret['Value']
ret = self._escapeString(value)
if not ret['OK']:
return ret
value = ret['Value']
ret = self._escapeString(rescheduleCounter)
if not ret['OK']:
return ret
rescheduleCounter = ret['Value']
cmd = 'INSERT INTO AtticJobParameters (JobID,RescheduleCycle,Name,Value) VALUES(%s,%s,%s,%s)' % \
(jobID, rescheduleCounter, key, value)
return self._update(cmd)
#############################################################################
def __setInitialJobParameters(self, classadJob, jobID):
""" Set initial job parameters as was defined in the Classad
"""
# Extract initital job parameters
parameters = {}
if classadJob.lookupAttribute("Parameters"):
parameters = classadJob.getDictionaryFromSubJDL("Parameters")
return self.setJobParameters(jobID, list(parameters.items()))
#############################################################################
def setJobJDL(self, jobID, jdl=None, originalJDL=None):
""" Insert JDL's for job specified by jobID
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString(jdl)
if not ret['OK']:
return ret
e_JDL = ret['Value']
ret = self._escapeString(originalJDL)
if not ret['OK']:
return ret
e_originalJDL = ret['Value']
req = "SELECT OriginalJDL FROM JobJDLs WHERE JobID=%s" % jobID
result = self._query(req)
updateFlag = False
if result['OK']:
if result['Value']:
updateFlag = True
if jdl:
if updateFlag:
cmd = "UPDATE JobJDLs Set JDL=%s WHERE JobID=%s" % (e_JDL, jobID)
else:
cmd = "INSERT INTO JobJDLs (JobID,JDL) VALUES (%s,%s)" % (jobID, e_JDL)
result = self._update(cmd)
if not result['OK']:
return result
if originalJDL:
if updateFlag:
cmd = "UPDATE JobJDLs Set OriginalJDL=%s WHERE JobID=%s" % (e_originalJDL, jobID)
else:
cmd = "INSERT INTO JobJDLs (JobID,OriginalJDL) VALUES (%s,%s)" % (jobID, e_originalJDL)
result = self._update(cmd)
return result
#############################################################################
def __insertNewJDL(self, jdl):
"""Insert a new JDL in the system, this produces a new JobID
"""
err = 'JobDB.__insertNewJDL: Failed to retrieve a new Id.'
result = self.insertFields('JobJDLs',
['JDL', 'JobRequirements', 'OriginalJDL'],
['', '', jdl])
if not result['OK']:
self.log.error('Can not insert New JDL', result['Message'])
return result
if 'lastRowId' not in result:
return S_ERROR('%s' % err)
jobID = int(result['lastRowId'])
self.log.info('JobDB: New JobID served', "%s" % jobID)
return S_OK(jobID)
#############################################################################
def getJobJDL(self, jobID, original=False, status=''):
""" Get JDL for job specified by its jobID. By default the current job JDL
is returned. If 'original' argument is True, original JDL is returned
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString(status)
if not ret['OK']:
return ret
e_status = ret['Value']
if original:
cmd = "SELECT OriginalJDL FROM JobJDLs WHERE JobID=%s" % jobID
else:
cmd = "SELECT JDL FROM JobJDLs WHERE JobID=%s" % jobID
if status:
cmd = cmd + " AND Status=%s" % e_status
result = self._query(cmd)
if result['OK']:
jdl = result['Value']
if not jdl:
return S_OK(jdl)
return S_OK(result['Value'][0][0])
return result
#############################################################################
def insertNewJobIntoDB(self, jdl, owner, ownerDN, ownerGroup, diracSetup,
initialStatus="Received",
initialMinorStatus="Job accepted"):
""" Insert the initial JDL into the Job database,
Do initial JDL crosscheck,
Set Initial job Attributes and Status
:param str jdl: job description JDL
:param str owner: job owner user name
:param str ownerDN: job owner DN
:param str ownerGroup: job owner group
:param str diracSetup: setup in which context the job is submitted
:param str initialStatus: optional initial job status (Received by default)
:param str initialMinorStatus: optional initial minor job status
:return: new job ID
"""
jobManifest = JobManifest()
result = jobManifest.load(jdl)
if not result['OK']:
return result
jobManifest.setOptionsFromDict({'OwnerName': owner,
'OwnerDN': ownerDN,
'OwnerGroup': ownerGroup,
'DIRACSetup': diracSetup})
result = jobManifest.check()
if not result['OK']:
return result
jobAttrNames = []
jobAttrValues = []
# 1.- insert original JDL on DB and get new JobID
# Fix the possible lack of the brackets in the JDL
if jdl.strip()[0].find('[') != 0:
jdl = '[' + jdl + ']'
result = self.__insertNewJDL(jdl)
if not result['OK']:
return S_ERROR(EWMSSUBM, 'Failed to insert JDL in to DB')
jobID = result['Value']
jobManifest.setOption('JobID', jobID)
jobAttrNames.append('JobID')
jobAttrValues.append(jobID)
jobAttrNames.append('LastUpdateTime')
jobAttrValues.append(Time.toString())
jobAttrNames.append('SubmissionTime')
jobAttrValues.append(Time.toString())
jobAttrNames.append('Owner')
jobAttrValues.append(owner)
jobAttrNames.append('OwnerDN')
jobAttrValues.append(ownerDN)
jobAttrNames.append('OwnerGroup')
jobAttrValues.append(ownerGroup)
jobAttrNames.append('DIRACSetup')
jobAttrValues.append(diracSetup)
# 2.- Check JDL and Prepare DIRAC JDL
jobJDL = jobManifest.dumpAsJDL()
# Replace the JobID placeholder if any
if jobJDL.find('%j') != -1:
jobJDL = jobJDL.replace('%j', str(jobID))
classAdJob = ClassAd(jobJDL)
classAdReq = ClassAd('[]')
retVal = S_OK(jobID)
retVal['JobID'] = jobID
if not classAdJob.isOK():
jobAttrNames.append('Status')
jobAttrValues.append('Failed')
jobAttrNames.append('MinorStatus')
jobAttrValues.append('Error in JDL syntax')
result = self.insertFields('Jobs', jobAttrNames, jobAttrValues)
if not result['OK']:
return result
retVal['Status'] = 'Failed'
retVal['MinorStatus'] = 'Error in JDL syntax'
return retVal
classAdJob.insertAttributeInt('JobID', jobID)
result = self.__checkAndPrepareJob(jobID, classAdJob, classAdReq,
owner, ownerDN,
ownerGroup, diracSetup,
jobAttrNames, jobAttrValues)
if not result['OK']:
return result
priority = classAdJob.getAttributeInt('Priority')
if priority is None:
priority = 0
jobAttrNames.append('UserPriority')
jobAttrValues.append(priority)
for jdlName in self.jdl2DBParameters:
# Defaults are set by the DB.
jdlValue = classAdJob.getAttributeString(jdlName)
if jdlValue:
jobAttrNames.append(jdlName)
jobAttrValues.append(jdlValue)
jdlValue = classAdJob.getAttributeString('Site')
if jdlValue:
jobAttrNames.append('Site')
if jdlValue.find(',') != -1:
jobAttrValues.append('Multiple')
else:
jobAttrValues.append(jdlValue)
jobAttrNames.append('VerifiedFlag')
jobAttrValues.append('True')
jobAttrNames.append('Status')
jobAttrValues.append(initialStatus)
jobAttrNames.append('MinorStatus')
jobAttrValues.append(initialMinorStatus)
reqJDL = classAdReq.asJDL()
classAdJob.insertAttributeInt('JobRequirements', reqJDL)
jobJDL = classAdJob.asJDL()
result = self.setJobJDL(jobID, jobJDL)
if not result['OK']:
return result
# Adding the job in the Jobs table
result = self.insertFields('Jobs', jobAttrNames, jobAttrValues)
if not result['OK']:
return result
# Setting the Job parameters
result = self.__setInitialJobParameters(classAdJob, jobID)
if not result['OK']:
return result
# Looking for the Input Data
inputData = []
if classAdJob.lookupAttribute('InputData'):
inputData = classAdJob.getListFromExpression('InputData')
values = []
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
e_jobID = ret['Value']
for lfn in inputData:
# some jobs are setting empty string as InputData
if not lfn:
continue
ret = self._escapeString(lfn.strip())
if not ret['OK']:
return ret
lfn = ret['Value']
values.append('(%s, %s )' % (e_jobID, lfn))
if values:
cmd = 'INSERT INTO InputData (JobID,LFN) VALUES %s' % ', '.join(values)
result = self._update(cmd)
if not result['OK']:
return result
retVal['Status'] = initialStatus
retVal['MinorStatus'] = initialMinorStatus
return retVal
def __checkAndPrepareJob(self, jobID, classAdJob, classAdReq, owner, ownerDN,
ownerGroup, diracSetup, jobAttrNames, jobAttrValues):
"""
Check Consistency of Submitted JDL and set some defaults
Prepare subJDL with Job Requirements
"""
error = ''
vo = getVOForGroup(ownerGroup)
jdlDiracSetup = classAdJob.getAttributeString('DIRACSetup')
jdlOwner = classAdJob.getAttributeString('Owner')
jdlOwnerDN = classAdJob.getAttributeString('OwnerDN')
jdlOwnerGroup = classAdJob.getAttributeString('OwnerGroup')
jdlVO = classAdJob.getAttributeString('VirtualOrganization')
# The below is commented out since this is always overwritten by the submitter IDs
# but the check allows to findout inconsistent client environments
if jdlDiracSetup and jdlDiracSetup != diracSetup:
error = 'Wrong DIRAC Setup in JDL'
if jdlOwner and jdlOwner != owner:
error = 'Wrong Owner in JDL'
elif jdlOwnerDN and jdlOwnerDN != ownerDN:
error = 'Wrong Owner DN in JDL'
elif jdlOwnerGroup and jdlOwnerGroup != ownerGroup:
error = 'Wrong Owner Group in JDL'
elif jdlVO and jdlVO != vo:
error = 'Wrong Virtual Organization in JDL'
classAdJob.insertAttributeString('Owner', owner)
classAdJob.insertAttributeString('OwnerDN', ownerDN)
classAdJob.insertAttributeString('OwnerGroup', ownerGroup)
if vo:
classAdJob.insertAttributeString('VirtualOrganization', vo)
classAdReq.insertAttributeString('Setup', diracSetup)
classAdReq.insertAttributeString('OwnerDN', ownerDN)
classAdReq.insertAttributeString('OwnerGroup', ownerGroup)
if vo:
classAdReq.insertAttributeString('VirtualOrganization', vo)
setup = gConfig.getValue('/DIRAC/Setup', '')
voPolicyDict = gConfig.getOptionsDict('/DIRAC/VOPolicy/%s/%s' % (vo, setup))
# voPolicyDict = gConfig.getOptionsDict('/DIRAC/VOPolicy')
if voPolicyDict['OK']:
voPolicy = voPolicyDict['Value']
for param, val in voPolicy.items():
if not classAdJob.lookupAttribute(param):
classAdJob.insertAttributeString(param, val)
# priority
priority = classAdJob.getAttributeInt('Priority')
if priority is None:
priority = 0
classAdReq.insertAttributeInt('UserPriority', priority)
# CPU time
cpuTime = classAdJob.getAttributeInt('CPUTime')
if cpuTime is None:
# Just in case check for MaxCPUTime for backward compatibility
cpuTime = classAdJob.getAttributeInt('MaxCPUTime')
if cpuTime is not None:
classAdJob.insertAttributeInt('CPUTime', cpuTime)
else:
opsHelper = Operations(group=ownerGroup,
setup=diracSetup)
cpuTime = opsHelper.getValue('JobDescription/DefaultCPUTime', 86400)
classAdReq.insertAttributeInt('CPUTime', cpuTime)
# platform(s)
platformList = classAdJob.getListFromExpression('Platform')
if platformList:
result = self.getDIRACPlatform(platformList)
if not result['OK']:
return result
if result['Value']:
classAdReq.insertAttributeVectorString('Platforms', result['Value'])
else:
error = "OS compatibility info not found"
if error:
retVal = S_ERROR(EWMSSUBM, error)
retVal['JobId'] = jobID
retVal['Status'] = 'Failed'
retVal['MinorStatus'] = error
jobAttrNames.append('Status')
jobAttrValues.append('Failed')
jobAttrNames.append('MinorStatus')
jobAttrValues.append(error)
resultInsert = self.setJobAttributes(jobID, jobAttrNames, jobAttrValues)
if not resultInsert['OK']:
retVal['MinorStatus'] += '; %s' % resultInsert['Message']
return retVal
return S_OK()
#############################################################################
def removeJobFromDB(self, jobIDs):
"""Remove job from DB
Remove job from the Job DB and clean up all the job related data
in various tables
"""
# ret = self._escapeString(jobID)
# if not ret['OK']:
# return ret
# e_jobID = ret['Value']
if not isinstance(jobIDs, list):
jobIDList = [jobIDs]
else:
jobIDList = jobIDs
failedTablesList = []
jobIDString = ','.join([str(j) for j in jobIDList])
for table in ['InputData',
'JobParameters',
'AtticJobParameters',
'HeartBeatLoggingInfo',
'OptimizerParameters',
'JobCommands',
'Jobs',
'JobJDLs']:
cmd = 'DELETE FROM %s WHERE JobID in (%s)' % (table, jobIDString)
result = self._update(cmd)
if not result['OK']:
failedTablesList.append(table)
result = S_OK()
if failedTablesList:
result = S_ERROR('Errors while job removal')
result['FailedTables'] = failedTablesList
return result
#################################################################
def rescheduleJobs(self, jobIDs):
""" Reschedule all the jobs in the given list
"""
result = S_OK()
failedJobs = []
for jobID in jobIDs:
result = self.rescheduleJob(jobID)
if not result['OK']:
failedJobs.append(jobID)
if failedJobs:
result = S_ERROR('JobDB.rescheduleJobs: Not all the jobs were rescheduled')
result['FailedJobs'] = failedJobs
return result
#############################################################################
def rescheduleJob(self, jobID):
""" Reschedule the given job to run again from scratch. Retain the already
defined parameters in the parameter Attic
"""
# Check Verified Flag
result = self.getJobAttributes(jobID, ['Status', 'MinorStatus', 'VerifiedFlag', 'RescheduleCounter',
'Owner', 'OwnerDN', 'OwnerGroup', 'DIRACSetup'])
if result['OK']:
resultDict = result['Value']
else:
return S_ERROR('JobDB.getJobAttributes: can not retrieve job attributes')
if 'VerifiedFlag' not in resultDict:
return S_ERROR('Job ' + str(jobID) + ' not found in the system')
if not resultDict['VerifiedFlag']:
return S_ERROR('Job %s not Verified: Status = %s, MinorStatus = %s' % (
jobID,
resultDict['Status'],
resultDict['MinorStatus']))
# Check the Reschedule counter first
rescheduleCounter = int(resultDict['RescheduleCounter']) + 1
self.maxRescheduling = self.getCSOption('MaxRescheduling', self.maxRescheduling)
# Exit if the limit of the reschedulings is reached
if rescheduleCounter > self.maxRescheduling:
self.log.warn('Maximum number of reschedulings is reached', 'Job %s' % jobID)
res = self.setJobStatus(jobID, status='Failed', minor='Maximum of reschedulings reached')
if not res['OK']:
return res
return S_ERROR('Maximum number of reschedulings is reached: %s' % self.maxRescheduling)
jobAttrNames = []
jobAttrValues = []
jobAttrNames.append('RescheduleCounter')
jobAttrValues.append(rescheduleCounter)
# Save the job parameters for later debugging
result = self.getJobParameters(jobID)
if result['OK']:
parDict = result['Value']
for key, value in parDict.get(jobID, {}).iteritems():
result = self.setAtticJobParameter(jobID, key, value, rescheduleCounter - 1)
if not result['OK']:
break
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
e_jobID = ret['Value']
cmd = 'DELETE FROM JobParameters WHERE JobID=%s' % e_jobID
res = self._update(cmd)
if not res['OK']:
return res
# Delete optimizer parameters
cmd = 'DELETE FROM OptimizerParameters WHERE JobID=%s' % (e_jobID)
if not self._update(cmd)['OK']:
return S_ERROR('JobDB.removeJobOptParameter: operation failed.')
# the JobManager needs to know if there is InputData ??? to decide which optimizer to call
# proposal: - use the getInputData method
res = self.getJobJDL(jobID, original=True)
if not res['OK']:
return res
jdl = res['Value']
# Fix the possible lack of the brackets in the JDL
if jdl.strip()[0].find('[') != 0:
jdl = '[' + jdl + ']'
classAdJob = ClassAd(jdl)
classAdReq = ClassAd('[]')
retVal = S_OK(jobID)
retVal['JobID'] = jobID
classAdJob.insertAttributeInt('JobID', jobID)
result = self.__checkAndPrepareJob(jobID, classAdJob, classAdReq, resultDict['Owner'],
resultDict['OwnerDN'], resultDict['OwnerGroup'],
resultDict['DIRACSetup'],
jobAttrNames, jobAttrValues)
if not result['OK']:
return result
priority = classAdJob.getAttributeInt('Priority')
if priority is None:
priority = 0
jobAttrNames.append('UserPriority')
jobAttrValues.append(priority)
siteList = classAdJob.getListFromExpression('Site')
if not siteList:
site = 'ANY'
elif len(siteList) > 1:
site = "Multiple"
else:
site = siteList[0]
jobAttrNames.append('Site')
jobAttrValues.append(site)
jobAttrNames.append('Status')
jobAttrValues.append('Received')
jobAttrNames.append('MinorStatus')
jobAttrValues.append('Job Rescheduled')
jobAttrNames.append('ApplicationStatus')
jobAttrValues.append('Unknown')
jobAttrNames.append('ApplicationNumStatus')
jobAttrValues.append(0)
jobAttrNames.append('LastUpdateTime')
jobAttrValues.append(Time.toString())
jobAttrNames.append('RescheduleTime')
jobAttrValues.append(Time.toString())
reqJDL = classAdReq.asJDL()
classAdJob.insertAttributeInt('JobRequirements', reqJDL)
jobJDL = classAdJob.asJDL()
# Replace the JobID placeholder if any
if jobJDL.find('%j') != -1:
jobJDL = jobJDL.replace('%j', str(jobID))
result = self.setJobJDL(jobID, jobJDL)
if not result['OK']:
return result
result = self.__setInitialJobParameters(classAdJob, jobID)
if not result['OK']:
return result
result = self.setJobAttributes(jobID, jobAttrNames, jobAttrValues)
if not result['OK']:
return result
retVal['InputData'] = classAdJob.lookupAttribute("InputData")
retVal['RescheduleCounter'] = rescheduleCounter
retVal['Status'] = 'Received'
retVal['MinorStatus'] = 'Job Rescheduled'
return retVal
#############################################################################
def getUserSitesTuple(self, sites):
"""Returns tuple of active/banned/invalid sties from a user provided list."""
ret = self._escapeValues(sites)
if not ret['OK']:
return ret
sites = set(sites)
sitesSql = ret['Value']
sitesSql[0] = 'SELECT %s AS Site' % sitesSql[0]
sitesSql = ' UNION SELECT '.join(sitesSql)
cmd = "SELECT Site FROM (%s) " % sitesSql
cmd += "AS tmptable WHERE Site NOT IN (SELECT Site FROM SiteMask WHERE Status='Active')"
result = self._query(cmd)
if not result['OK']:
return result
nonActiveSites = set(x[0] for x in result['Value'])
activeSites = sites.difference(nonActiveSites)
bannedSites = nonActiveSites.intersection(set(self.getSiteMask('Banned')))
invalidSites = nonActiveSites.difference(bannedSites)
return S_OK((activeSites, bannedSites, invalidSites))
#############################################################################
def getSiteMask(self, siteState='Active'):
""" Get the currently active site list
"""
ret = self._escapeString(siteState)
if not ret['OK']:
return ret
siteState = ret['Value']
if siteState == "All":
cmd = "SELECT Site FROM SiteMask"
else:
cmd = "SELECT Site FROM SiteMask WHERE Status=%s" % siteState
result = self._query(cmd)
siteList = []
if result['OK']:
siteList = [x[0] for x in result['Value']]
else:
return S_ERROR(DErrno.EMYSQL, "SQL query failed: %s" % cmd)
return S_OK(siteList)
#############################################################################
def getSiteMaskStatus(self, sites=None):
""" Get the current site mask status
:param sites: A string for a single site to check, or a list to check multiple sites.
:returns: If input was a list, a dictionary of sites, keys are site
names and values are the site statuses. Unknown sites are
not included in the output dictionary.
If input was a string, then a single value with that site's
status, or S_ERROR if the site does not exist in the DB.
"""
if isinstance(sites, list):
safeSites = []
for site in sites:
res = self._escapeString(site)
if not res['OK']:
return res
safeSites.append(res['Value'])
sitesString = ",".join(safeSites)
cmd = "SELECT Site, Status FROM SiteMask WHERE Site in (%s)" % sitesString
result = self._query(cmd)
return S_OK(dict(result['Value']))
elif isinstance(sites, str):
ret = self._escapeString(sites)
if not ret['OK']:
return ret
cmd = "SELECT Status FROM SiteMask WHERE Site=%s" % ret['Value']
result = self._query(cmd)
if result['Value']:
return S_OK(result['Value'][0][0])
return S_ERROR("Unknown site %s" % sites)
else:
cmd = "SELECT Site,Status FROM SiteMask"
result = self._query(cmd)
siteDict = {}
if result['OK']:
for site, status in result['Value']:
siteDict[site] = status
else:
return S_ERROR(DErrno.EMYSQL, "SQL query failed: %s" % cmd)
return S_OK(siteDict)
#############################################################################
def getAllSiteMaskStatus(self):
""" Get the everything from site mask status
"""
cmd = "SELECT Site,Status,LastUpdateTime,Author,Comment FROM SiteMask"
result = self._query(cmd)
if not result['OK']:
return result['Message']
siteDict = {}
if result['OK']:
for site, status, lastUpdateTime, author, comment in result['Value']:
siteDict[site] = status, lastUpdateTime, author, comment
return S_OK(siteDict)
#############################################################################
def setSiteMask(self, siteMaskList, authorDN='Unknown', comment='No comment'):
""" Set the Site Mask to the given mask in a form of a list of tuples (site,status)
"""
for site, status in siteMaskList:
result = self.__setSiteStatusInMask(site, status, authorDN, comment)
if not result['OK']:
return result
return S_OK()
#############################################################################
def __setSiteStatusInMask(self, site, status, authorDN='Unknown', comment='No comment'):
""" Set the given site status to 'status' or add a new active site
"""
result = self._escapeString(site)
if not result['OK']:
return result
site = result['Value']
result = self._escapeString(status)
if not result['OK']:
return result
status = result['Value']
result = self._escapeString(authorDN)
if not result['OK']:
return result
authorDN = result['Value']
result = self._escapeString(comment)
if not result['OK']:
return result
comment = result['Value']
req = "SELECT Status FROM SiteMask WHERE Site=%s" % site
result = self._query(req)
if result['OK']:
if result['Value']:
current_status = result['Value'][0][0]
if current_status == status:
return S_OK()
else:
req = "UPDATE SiteMask SET Status=%s,LastUpdateTime=UTC_TIMESTAMP()," \
"Author=%s, Comment=%s WHERE Site=%s"
req = req % (status, authorDN, comment, site)
else:
req = "INSERT INTO SiteMask VALUES (%s,%s,UTC_TIMESTAMP(),%s,%s)" % (site, status, authorDN, comment)
result = self._update(req)
if not result['OK']:
return S_ERROR('Failed to update the Site Mask')
# update the site mask logging record
req = "INSERT INTO SiteMaskLogging VALUES (%s,%s,UTC_TIMESTAMP(),%s,%s)" % (site, status, authorDN, comment)
result = self._update(req)
if not result['OK']:
self.log.warn('Failed to update site mask logging', 'for %s' % site)
else:
return S_ERROR('Failed to get the Site Status from the Mask')
return S_OK()
#############################################################################
def banSiteInMask(self, site, authorDN='Unknown', comment='No comment'):
""" Forbid the given site in the Site Mask
"""
return self.__setSiteStatusInMask(site, 'Banned', authorDN, comment)
#############################################################################
def allowSiteInMask(self, site, authorDN='Unknown', comment='No comment'):
""" Forbid the given site in the Site Mask
"""
return self.__setSiteStatusInMask(site, 'Active', authorDN, comment)
#############################################################################
def removeSiteFromMask(self, site=None):
""" Remove the given site from the mask
"""
if not site:
req = "DELETE FROM SiteMask"
else:
ret = self._escapeString(site)
if not ret['OK']:
return ret
site = ret['Value']
req = "DELETE FROM SiteMask WHERE Site=%s" % site
return self._update(req)
#############################################################################
def getSiteMaskLogging(self, siteList):
""" Get the site mask logging history for the list if site names
"""
if siteList:
siteString = ','.join(["'" + x + "'" for x in siteList])
req = "SELECT Site,Status,UpdateTime,Author,Comment FROM SiteMaskLogging WHERE Site in (%s)" % siteString
else:
req = "SELECT Site,Status,UpdateTime,Author,Comment FROM SiteMaskLogging"
req += " ORDER BY UpdateTime ASC"
result = self._query(req)
if not result['OK']:
return result
availableSiteList = []
for row in result['Value']:
site, status, utime, author, comment = row
availableSiteList.append(site)
resultDict = {}
for site in siteList:
if not result['Value'] or site not in availableSiteList:
ret = self._escapeString(site)
if not ret['OK']:
continue
e_site = ret['Value']
req = "SELECT Status Site,Status,LastUpdateTime,Author,Comment FROM SiteMask WHERE Site=%s" % e_site
resSite = self._query(req)
if resSite['OK']:
if resSite['Value']:
site, status, lastUpdate, author, comment = resSite['Value'][0]
resultDict[site] = [(status, str(lastUpdate), author, comment)]
else:
resultDict[site] = [('Unknown', '', '', 'Site not present in logging table')]
for row in result['Value']:
site, status, utime, author, comment = row
if site not in resultDict:
resultDict[site] = []
resultDict[site].append((status, str(utime), author, comment))
return S_OK(resultDict)
#############################################################################
def getSiteSummary(self):
""" Get the summary of jobs in a given status on all the sites
"""
waitingList = ['"Submitted"', '"Assigned"', '"Waiting"', '"Matched"']
waitingString = ','.join(waitingList)
result = self.getDistinctJobAttributes('Site')
if not result['OK']:
return result
siteList = result['Value']
siteDict = {}
totalDict = {'Waiting': 0, 'Running': 0, 'Stalled': 0, 'Done': 0, 'Failed': 0}
for site in siteList:
if site == "ANY":
continue
# Waiting
siteDict[site] = {}
ret = self._escapeString(site)
if not ret['OK']:
return ret
e_site = ret['Value']
req = "SELECT COUNT(JobID) FROM Jobs WHERE Status IN (%s) AND Site=%s" % (waitingString, e_site)
result = self._query(req)
if result['OK']:
count = result['Value'][0][0]
else:
return S_ERROR('Failed to get Site data from the JobDB')
siteDict[site]['Waiting'] = count
totalDict['Waiting'] += count
# Running,Stalled,Done,Failed
for status in ['"Running"', '"Stalled"', '"Done"', '"Failed"']:
req = "SELECT COUNT(JobID) FROM Jobs WHERE Status=%s AND Site=%s" % (status, e_site)
result = self._query(req)
if result['OK']:
count = result['Value'][0][0]
else:
return S_ERROR('Failed to get Site data from the JobDB')
siteDict[site][status.replace('"', '')] = count
totalDict[status.replace('"', '')] += count
siteDict['Total'] = totalDict
return S_OK(siteDict)
#################################################################################
def getSiteSummaryWeb(self, selectDict, sortList, startItem, maxItems):
""" Get the summary of jobs in a given status on all the sites in the standard Web form
"""
paramNames = ['Site', 'GridType', 'Country', 'Tier', 'MaskStatus']
paramNames += JOB_STATES
paramNames += ['Efficiency', 'Status']
# FIXME: hack!!!
siteT1List = ['CERN', 'IN2P3', 'NIKHEF', 'SARA', 'PIC', 'CNAF', 'RAL', 'GRIDKA', 'RRCKI']
# Sort out records as requested
sortItem = -1
sortOrder = "ASC"
if sortList:
item = sortList[0][0] # only one item for the moment
sortItem = paramNames.index(item)
sortOrder = sortList[0][1]
last_update = None
if 'LastUpdateTime' in selectDict:
last_update = selectDict['LastUpdateTime']
del selectDict['LastUpdateTime']
result = self.getCounters('Jobs', ['Site', 'Status'],
{}, newer=last_update,
timeStamp='LastUpdateTime')
last_day = Time.dateTime() - Time.day
resultDay = self.getCounters('Jobs', ['Site', 'Status'],
{}, newer=last_day,
timeStamp='EndExecTime')
# Get the site mask status
siteMask = {}
resultMask = self.siteClient.getSites('All')
if resultMask['OK']:
for site in resultMask['Value']:
siteMask[site] = 'NoMask'
resultMask = self.siteClient.getSites('Active')
if resultMask['OK']:
for site in resultMask['Value']:
siteMask[site] = 'Active'
resultMask = self.siteClient.getSites('Banned')
if resultMask['OK']:
for site in resultMask['Value']:
siteMask[site] = 'Banned'
# Sort out different counters
resultDict = {}
if result['OK']:
for attDict, count in result['Value']:
siteFullName = attDict['Site']
status = attDict['Status']
if siteFullName not in resultDict:
resultDict[siteFullName] = {}
for state in JOB_STATES:
resultDict[siteFullName][state] = 0
if status not in JOB_FINAL_STATES:
resultDict[siteFullName][status] = count
if resultDay['OK']:
for attDict, count in resultDay['Value']:
siteFullName = attDict['Site']
if siteFullName not in resultDict:
resultDict[siteFullName] = {}
for state in JOB_STATES:
resultDict[siteFullName][state] = 0
status = attDict['Status']
if status in JOB_FINAL_STATES:
resultDict[siteFullName][status] = count
# Collect records now
records = []
countryCounts = {}
for siteFullName in resultDict:
siteDict = resultDict[siteFullName]
if siteFullName.count('.') == 2:
grid, site, country = siteFullName.split('.')
else:
grid, site, country = 'Unknown', 'Unknown', 'Unknown'
tier = 'Tier-2'
if site in siteT1List:
tier = 'Tier-1'
if country not in countryCounts:
countryCounts[country] = {}
for state in JOB_STATES:
countryCounts[country][state] = 0
rList = [siteFullName, grid, country, tier]
if siteFullName in siteMask:
rList.append(siteMask[siteFullName])
else:
rList.append('NoMask')
for status in JOB_STATES:
rList.append(siteDict[status])
countryCounts[country][status] += siteDict[status]
efficiency = 0
total_finished = 0
for state in JOB_FINAL_STATES:
total_finished += resultDict[siteFullName][state]
if total_finished > 0:
efficiency = float(siteDict['Done'] + siteDict['Completed']) / float(total_finished)
rList.append('%.1f' % (efficiency * 100.))
# Estimate the site verbose status
if efficiency > 0.95:
rList.append('Good')
elif efficiency > 0.80:
rList.append('Fair')
elif efficiency > 0.60:
rList.append('Poor')
elif total_finished == 0:
rList.append('Idle')
else:
rList.append('Bad')
records.append(rList)
# Select records as requested
if selectDict:
for item in selectDict:
selectItem = paramNames.index(item)
values = selectDict[item]
if not isinstance(values, list):
values = [values]
indices = list(range(len(records)))
indices.reverse()
for ind in indices:
if records[ind][selectItem] not in values:
del records[ind]
# Sort records as requested
if sortItem != -1:
if sortOrder.lower() == "asc":
records.sort(key=operator.itemgetter(sortItem))
else:
records.sort(key=operator.itemgetter(sortItem), reverse=True)
# Collect the final result
finalDict = {}
finalDict['ParameterNames'] = paramNames
# Return all the records if maxItems == 0 or the specified number otherwise
if maxItems:
if startItem + maxItems > len(records):
finalDict['Records'] = records[startItem:]
else:
finalDict['Records'] = records[startItem:startItem + maxItems]
else:
finalDict['Records'] = records
finalDict['TotalRecords'] = len(records)
finalDict['Extras'] = countryCounts
return S_OK(finalDict)
#####################################################################################
def setHeartBeatData(self, jobID, staticDataDict, dynamicDataDict):
""" Add the job's heart beat data to the database
"""
# Set the time stamp first
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
e_jobID = ret['Value']
req = "UPDATE Jobs SET HeartBeatTime=UTC_TIMESTAMP(), Status='Running' WHERE JobID=%s" % e_jobID
result = self._update(req)
if not result['OK']:
return S_ERROR('Failed to set the heart beat time: ' + result['Message'])
ok = True
# FIXME: It is rather not optimal to use parameters to store the heartbeat info, must find a proper solution
# Add static data items as job parameters
result = self.setJobParameters(jobID, list(staticDataDict.items()))
if not result['OK']:
ok = False
self.log.warn(result['Message'])
# Add dynamic data to the job heart beat log
# start = time.time()
valueList = []
for key, value in dynamicDataDict.items():
result = self._escapeString(key)
if not result['OK']:
self.log.warn('Failed to escape string ', key)
continue
e_key = result['Value']
result = self._escapeString(value)
if not result['OK']:
self.log.warn('Failed to escape string ', value)
continue
e_value = result['Value']
valueList.append("( %s, %s,%s,UTC_TIMESTAMP())" % (e_jobID, e_key, e_value))
if valueList:
valueString = ','.join(valueList)
req = "INSERT INTO HeartBeatLoggingInfo (JobID,Name,Value,HeartBeatTime) VALUES "
req += valueString
result = self._update(req)
if not result['OK']:
ok = False
self.log.warn(result['Message'])
if ok:
return S_OK()
return S_ERROR('Failed to store some or all the parameters')
#####################################################################################
def getHeartBeatData(self, jobID):
""" Retrieve the job's heart beat data
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
cmd = 'SELECT Name,Value,HeartBeatTime from HeartBeatLoggingInfo WHERE JobID=%s' % jobID
res = self._query(cmd)
if not res['OK']:
return res
if not res['Value']:
return S_OK([])
result = []
values = res['Value']
for row in values:
result.append((str(row[0]), '%.01f' % (float(row[1].replace('"', ''))), str(row[2])))
return S_OK(result)
#####################################################################################
def setJobCommand(self, jobID, command, arguments=None):
""" Store a command to be passed to the job together with the
next heart beat
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString(command)
if not ret['OK']:
return ret
command = ret['Value']
if arguments:
ret = self._escapeString(arguments)
if not ret['OK']:
return ret
arguments = ret['Value']
else:
arguments = "''"
req = "INSERT INTO JobCommands (JobID,Command,Arguments,ReceptionTime) "
req += "VALUES (%s,%s,%s,UTC_TIMESTAMP())" % (jobID, command, arguments)
return self._update(req)
#####################################################################################
def getJobCommand(self, jobID, status='Received'):
""" Get a command to be passed to the job together with the
next heart beat
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString(status)
if not ret['OK']:
return ret
status = ret['Value']
req = "SELECT Command, Arguments FROM JobCommands WHERE JobID=%s AND Status=%s" % (jobID, status)
result = self._query(req)
if not result['OK']:
return result
resultDict = {}
if result['Value']:
for row in result['Value']:
resultDict[row[0]] = row[1]
return S_OK(resultDict)
#####################################################################################
def setJobCommandStatus(self, jobID, command, status):
""" Set the command status
"""
ret = self._escapeString(jobID)
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString(command)
if not ret['OK']:
return ret
command = ret['Value']
ret = self._escapeString(status)
if not ret['OK']:
return ret
status = ret['Value']
req = "UPDATE JobCommands SET Status=%s WHERE JobID=%s AND Command=%s" % (status, jobID, command)
return self._update(req)
#####################################################################################
def getSummarySnapshot(self, requestedFields=False):
""" Get the summary snapshot for a given combination
"""
if not requestedFields:
requestedFields = ['Status', 'MinorStatus',
'Site', 'Owner', 'OwnerGroup',
'JobGroup', 'JobSplitType']
defFields = ['DIRACSetup'] + requestedFields
valueFields = ['COUNT(JobID)', 'SUM(RescheduleCounter)']
defString = ", ".join(defFields)
valueString = ", ".join(valueFields)
sqlCmd = "SELECT %s, %s From Jobs GROUP BY %s" % (defString, valueString, defString)
result = self._query(sqlCmd)
if not result['OK']:
return result
return S_OK(((defFields + valueFields), result['Value']))
| chaen/DIRAC | WorkloadManagementSystem/DB/JobDB.py | Python | gpl-3.0 | 70,102 | [
"DIRAC"
] | 867aa07137d16a5d5b1a9608b1c227a9e1681effce870b61697f62a6339b9a11 |
import logging
import re
from . import utils
#######################################################################################################################
# PUBLIC FUNCTIONS
#######################################################################################################################
def nifti2db(file_path, file_type, is_copy, step_id, db_conn, sid_by_patient=False, pid_in_vid=False):
"""Extract some meta-data from NIFTI files (actually mostly from their paths) and stores it in a DB.
Arguments:
:param file_path: File path.
:param file_type: File type.
:param is_copy: Indicate if this file is a copy.
:param step_id: Step ID.
:param db_conn: Database connection.
:param sid_by_patient: Rarely, a data set might use study IDs which are unique by patient
(not for the whole study).
E.g.: LREN data. In such a case, you have to enable this flag. This will use PatientID + StudyID as a session ID.
:param pid_in_vid: Rarely, a data set might mix patient IDs and visit IDs. E.g. : LREN data. In such a case, you
to enable this flag. This will try to split PatientID into VisitID and PatientID.
:return:
"""
logging.info("Processing '%s'" % file_path)
df = db_conn.db_session.query(db_conn.DataFile).filter_by(path=file_path).one_or_none()
dataset = db_conn.get_dataset(step_id)
_extract_participant(db_conn, file_path, pid_in_vid, dataset)
visit_id = _extract_visit(db_conn, file_path, pid_in_vid, sid_by_patient, dataset)
session_id = _extract_session(db_conn, file_path, visit_id)
sequence_id = _extract_sequence(db_conn, file_path, session_id)
repetition_id = _extract_repetition(db_conn, file_path, sequence_id)
if not df:
df = db_conn.DataFile(
path=file_path,
type=file_type,
is_copy=is_copy,
processing_step_id=step_id,
repetition_id=repetition_id
)
db_conn.db_session.merge(df)
db_conn.db_session.commit()
else:
if file_type not in [None, '', df.type]:
df.type = file_type
db_conn.db_session.commit()
if is_copy not in [None, df.is_copy]:
df.is_copy = is_copy
db_conn.db_session.commit()
if step_id not in [None, df.processing_step_id]:
df.processing_step_id = step_id
db_conn.db_session.commit()
if repetition_id not in [None, df.repetition_id]:
df.repetition_id = repetition_id
db_conn.db_session.commit()
#######################################################################################################################
# PRIVATE FUNCTIONS
#######################################################################################################################
def _extract_participant(db_conn, file_path, pid_in_vid, dataset):
participant_name = str(re.findall('/([^/]+?)/[^/]+?/[^/]+?/[^/]+?/[^/]+?\.nii', file_path)[0])
if pid_in_vid:
try:
participant_name = utils.split_patient_id(participant_name)[1]
except TypeError:
pass
participant_id = db_conn.get_participant_id(participant_name, dataset)
# Sync participant table with participant_mapping table
participant = db_conn.db_session.query(db_conn.Participant).filter_by(id=participant_id).one_or_none()
if not participant:
participant = db_conn.Participant(
id=participant_id
)
db_conn.db_session.merge(participant)
return participant_id
def _extract_session(db_conn, file_path, visit_id):
try:
session = str(re.findall('/([^/]+?)/[^/]+?/[^/]+?/[^/]+?\.nii', file_path)[0])
except AttributeError:
logging.debug("Field StudyID was not found")
session = None
return db_conn.get_session_id(session, visit_id)
def _extract_sequence(db_conn, file_path, session_id):
sequence_name = str(re.findall('/([^/]+?)/[^/]+?/[^/]+?\.nii', file_path)[0])
return db_conn.get_sequence_id(sequence_name, session_id)
def _extract_visit(db_conn, file_path, pid_in_vid, by_patient, dataset):
participant_name = str(re.findall('/([^/]+?)/[^/]+?/[^/]+?/[^/]+?/[^/]+?\.nii', file_path)[0])
visit_name = None
if pid_in_vid: # If the patient ID and the visit ID are mixed into the PatientID field (e.g. LREN data)
try:
visit_name = utils.split_patient_id(participant_name)[0]
except TypeError:
visit_name = None
if not pid_in_vid or not visit_name: # Otherwise, we use the StudyID (also used as a session ID) (e.g. PPMI data)
try:
visit_name = str(re.findall('/([^/]+?)/[^/]+?/[^/]+?/[^/]+?\.nii', file_path)[0])
if by_patient: # If the Study ID is given at the patient level (e.g. LREN data), here is a little trick
visit_name = participant_name + "_" + visit_name
except AttributeError:
logging.debug("Field StudyID was not found")
visit_name = None
visit_id = db_conn.get_visit_id(visit_name, dataset)
# Sync visit table with visit_mapping table
participant_id = db_conn.get_participant_id(participant_name, dataset)
visit = db_conn.db_session.query(db_conn.Visit).filter_by(id=visit_id).one_or_none()
if not visit:
visit = db_conn.Visit(
id=visit_id,
participant_id=participant_id,
)
db_conn.db_session.merge(visit)
return visit_id
def _extract_repetition(db_conn, file_path, sequence_id):
repetition_name = str(re.findall('/([^/]+?)/[^/]+?\.nii', file_path)[0])
return db_conn.get_repetition_id(repetition_name, sequence_id)
| LREN-CHUV/mri-meta-extract | data_tracking/nifti_import.py | Python | apache-2.0 | 5,700 | [
"VisIt"
] | c210210abe4ebe51ff824cb14ed7011be5401b8d57aac1e0766c88d0c072af31 |
../../../../share/pyshared/orca/outline.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/outline.py | Python | gpl-3.0 | 42 | [
"ORCA"
] | 4d562f0659105e59fcf111ca891246be8f38b3a7597c3d05d913a5b52a8d5614 |
#! /usr/bin/env python
# Formatting note: this file's maximum line length is 128 characters.
# TODO: Python 2.x/3.x compatibility
from multiprocessing import Process, Queue as mpQueue
from itertools import count, takewhile
from functools import reduce
from random import randrange
from math import log
def isqrt(n):
if n == 0: return 0
x, y = n, (n + 1) / 2
while y < x: x, y = y, (y + n/y) / 2
return x
def introot(n, r=2):
if n < 0: return None if r%2 == 0 else -introot(-n, r)
if n < 2: return n
if r == 2: return isqrt(n)
lower, upper = 0, n
while lower != upper - 1:
mid = (lower + upper) / 2
m = mid**r
if m == n: return mid
elif m < n: lower = mid
elif m > n: upper = mid
return lower
# Recursive sieve of Eratosthenes
def primegen():
yield 2; yield 3; yield 5; yield 7; yield 11; yield 13;
ps = primegen() # yay recursion
p = ps.next() and ps.next()
q, sieve, n = p**2, {}, 13
while True:
if n not in sieve:
if n < q: yield n
else:
next, step = q + 2*p, 2*p
while next in sieve: next += step
sieve[next] = step
p = ps.next()
q = p**2
else:
step = sieve.pop(n)
next = n + step
while next in sieve: next += step
sieve[next] = step
n += 2
def primes(n): return list(takewhile(lambda p: p < n, primegen())) # The primes STRICTLY LESS than n
def listprod(a): return reduce(lambda x,y:x*y, a, 1)
def nextprime(n):
if n < 2: return 2
if n == 2: return 3
n = (n + 1) | 1 # first odd larger than n
m = n % 6
if m == 3:
if isprime(n+2): return n+2
n += 4
elif m == 5:
if isprime(n): return n
n += 2
for m in count(n, 6):
if isprime(m ): return m
if isprime(m+4): return m+4
def pfactor(n):
s, d, q = 0, n-1, 2
while not d & q - 1:
s, q = s+1, q*2
return s, d / (q / 2)
def sprp(n, a, s=None, d=None):
if n%2 == 0: return False
if (s is None) or (d is None): s, d = pfactor(n)
x = pow(a, d, n)
if x == 1: return True
for i in xrange(s):
if x == n - 1: return True
x = pow(x, 2, n)
return False
if mpzv == 2:
from gmpy2 import is_strong_prp
def sprp(n, a, s=None, d=None): return is_strong_prp(n, a)
def jacobi(a, p):
if (p%2 == 0) or (p < 0): return None # p must be a positive odd number
if (a == 0) or (a == 1): return a
a, t = a%p, 1
while a != 0:
while not a & 1:
a /= 2
if p & 7 in (3, 5): t *= -1
a, p = p, a
if (a & 3 == 3) and (p & 3) == 3: t *= -1
a %= p
return t if p == 1 else 0
if mpzv == 1: from gmpy import jacobi
if mpzv == 2: from gmpy2 import jacobi
def chain(n, u1, v1, u2, v2, d, q, m): # Used in SLPRP. TODO: figure out what this does.
k = q
while m > 0:
u2, v2, q = (u2*v2)%n, (v2*v2-2*q)%n, (q*q)%n
if m%2 == 1:
u1, v1 = u2*v1+u1*v2, v2*v1+u2*u1*d
if u1%2 == 1: u1 = u1 + n
if v1%2 == 1: v1 = v1 + n
u1, v1, k = (u1/2)%n, (v1/2)%n, (q*k)%n
m /= 2
return u1, v1, k
def isprime(n, tb=(3,5,7,11), eb=(2,), mrb=()): # TODO: more streamlining
# tb: trial division basis
# eb: Euler's test basis
# mrb: Miller-Rabin basis
# This test suite's first false positve is unknown but has been shown to be greater than 2**64.
# Infinitely many are thought to exist.
if n%2 == 0 or n < 13 or n == isqrt(n)**2: return n in (2, 3, 5, 7, 11) # Remove evens, squares, and numbers less than 13
if any(n%p == 0 for p in tb): return n in tb # Trial division
for b in eb: # Euler's test
if b >= n: continue
if not pow(b, n-1, n) == 1: return False
r = n - 1
while r%2 == 0: r /= 2
c = pow(b, r, n)
if c == 1: continue
while c != 1 and c != n-1: c = pow(c, 2, n)
if c == 1: return False
s, d = pfactor(n)
if not sprp(n, 2, s, d): return False
if n < 2047: return True
if n >= 3825123056546413051: # BPSW has two phases: SPRP with base 2 and SLPRP. We just did the SPRP; now we do the SLPRP:
d = 5
while True:
if gcd(d, n) > 1:
p, q = 0, 0
break
if jacobi(d, n) == -1:
p, q = 1, (1 - d) / 4
break
d = -d - 2*d/abs(d)
if p == 0: return n == d
s, t = pfactor(n + 2)
u, v, u2, v2, m = 1, p, 1, p, t/2
k = q
while m > 0:
u2, v2, q = (u2*v2)%n, (v2*v2-2*q)%n, (q*q)%n
if m%2 == 1:
u, v = u2*v+u*v2, v2*v+u2*u*d
if u%2 == 1: u += n
if v%2 == 1: v += n
u, v, k = (u/2)%n, (v/2)%n, (q*k)%n
m /= 2
if (u == 0) or (v == 0): return True
for i in xrange(1, s):
v, k = (v*v-2*k)%n, (k*k)%n
if v == 0: return True
return False
if not mrb:
if n < 1373653: mrb = [3]
elif n < 25326001: mrb = [3,5]
elif n < 3215031751: mrb = [3,5,7]
elif n < 2152302898747: mrb = [3,5,7,11]
elif n < 3474749660383: mrb = [3,5,6,11,13]
elif n < 341550071728321: mrb = [3,5,7,11,13,17] # This number is also a false positive for primes(19+1).
elif n < 3825123056546413051: mrb = [3,5,7,11,13,17,19,23] # Also a false positive for primes(31+1).
return all(sprp(n, b, s, d) for b in mrb) # Miller-Rabin
if mpzv == 2: from gmpy2 import is_bpsw_prp as isprime
def ilog(x, b): # greatest integer l such that b**l <= x.
l = 0
while x >= b:
x /= b
l += 1
return l
# Returns the largest integer that, when squared/cubed/etc, yields n, or 0 if no such integer exists.
# Note that the power to which this number is raised will be prime.
def ispower(n):
for p in primegen():
r = introot(n, p)
if r is None: continue
if r ** p == n: return r
if r == 1: return 0
def pollardRho_brent(n):
if isprime(n): return n
g = n
while g == n:
y, c, m, g, r, q = randrange(1, n), randrange(1, n), randrange(1, n), 1, 1, 1
while g==1:
x, k = y, 0
for i in xrange(r): y = (y**2 + c) % n
while k < r and g == 1:
ys = y
for i in xrange(min(m, r-k)):
y = (y**2 + c) % n
q = q * abs(x-y) % n
g, k = gcd(q, n), k+m
r *= 2
if g==n:
while True:
ys = (ys**2+c)%n
g = gcd(abs(x-ys), n)
if g > 1: break
return g
# http://programmingpraxis.com/2010/04/27/modern-elliptic-curve-factorization-part-2/
def pollard_pm1(n, B1=100, B2=1000): # TODO: What are the best default bounds and way to increment them?
if isprime(n): return n
m = ispower(n)
if m: return m
while True:
pg = primegen()
q = 2 # TODO: what about other initial values of q?
p = pg.next()
while p <= B1: q, p = pow(q, p**ilog(B1, p), n), pg.next()
g = gcd(q-1, n)
if 1 < g < n: return g
while p <= B2: q, p = pow(q, p, n), pg.next()
g = gcd(q-1, n)
if 1 < g < n: return g
# These bounds failed. Increase and try again.
B1 *= 10
B2 *= 10
def mlucas(v, a, n):
""" Helper function for williams_pp1(). Multiplies along a Lucas sequence modulo n. """
v1, v2 = v, (v**2 - 2) % n
for bit in bin(a)[3:]: v1, v2 = ((v1**2 - 2) % n, (v1*v2 - v) % n) if bit == "0" else ((v1*v2 - v) % n, (v2**2 - 2) % n)
return v1
def williams_pp1(n):
if isprime(n): return n
m = ispower(n)
if m: return m
for v in count(1):
for p in primegen():
e = ilog(isqrt(n), p)
if e == 0: break
for _ in xrange(e): v = mlucas(v, p, n)
g = gcd(v - 2, n)
if 1 < g < n: return g
if g == n: break
# http://programmingpraxis.com/2010/04/23/modern-elliptic-curve-factorization-part-1/
# http://programmingpraxis.com/2010/04/27/modern-elliptic-curve-factorization-part-2/
def ecadd(p1, p2, p0, n): # Add two points p1 and p2 given point P0 = P1-P2 modulo n
x1,z1 = p1; x2,z2 = p2; x0,z0 = p0
t1, t2 = (x1-z1)*(x2+z2), (x1+z1)*(x2-z2)
return (z0*pow(t1+t2,2,n) % n, x0*pow(t1-t2,2,n) % n)
def ecdub(p, A, n): # double point p on A modulo n
x, z = p; An, Ad = A
t1, t2 = pow(x+z,2,n), pow(x-z,2,n)
t = t1 - t2
return (t1*t2*4*Ad % n, (4*Ad*t2 + t*An)*t % n)
def ecmul(m, p, A, n): # multiply point p by m on curve A modulo n
if m == 0: return (0, 0)
elif m == 1: return p
else:
q = ecdub(p, A, n)
if m == 2: return q
b = 1
while b < m: b *= 2
b /= 4
r = p
while b:
if m&b: q, r = ecdub(q, A, n), ecadd(q, r, p, n)
else: q, r = ecadd(r, q, p, n), ecdub(r, A, n)
b /= 2
return r
def ecm(n, B1=10, B2=20): # TODO: Determine the best defaults for B1 and B2 and the best way to increment them and iters
# "Modern" ECM using Montgomery curves and an algorithm analogous to the two-phase variant of Pollard's p-1 method
# TODO: We currently compute the prime lists from the sieve as we need them, but this means that we recompute them at every
# iteration. While it would not be particularly efficient memory-wise, we might be able to increase time-efficiency
# by computing the primes we need ahead of time (say once at the beginning and then once each time we increase the
# bounds) and saving them in lists, and then iterate the inner while loops over those lists.
if isprime(n): return n
m = ispower(n)
if m: return m
iters = 1
while True:
for _ in xrange(iters): # TODO: multiprocessing?
# TODO: Do we really want to call the randomizer? Why not have seed be a function of B1, B2, and iters?
# TODO: Are some seeds better than others?
seed = randrange(6, n)
u, v = (seed**2 - 5) % n, 4*seed % n
p = pow(u, 3, n)
Q, C = (pow(v-u,3,n)*(3*u+v) % n, 4*p*v % n), (p, pow(v,3,n))
pg = primegen()
p = pg.next()
while p <= B1: Q, p = ecmul(p**ilog(B1, p), Q, C, n), pg.next()
g = gcd(Q[1], n)
if 1 < g < n: return g
while p <= B2:
# "There is a simple coding trick that can speed up the second stage. Instead of multiplying each prime times Q,
# we iterate over i from B1 + 1 to B2, adding 2Q at each step; when i is prime, the current Q can be accumulated
# into the running solution. Again, we defer the calculation of the greatest common divisor until the end of the
# iteration." TODO: Implement this trick and compare performance.
Q = ecmul(p, Q, C, n)
g *= Q[1]
g %= n
p = pg.next()
g = gcd(g, n)
if 1 < g < n: return g
# This seed failed. Try again with a new one.
# These bounds failed. Increase and try again.
B1 *= 3
B2 *= 3
iters *= 2
# legendre symbol (a|m)
# TODO: which is faster?
def legendre1(a, p): return ((pow(a, (p-1) >> 1, p) + 1) % p) - 1
def legendre2(a, p): # TODO: pretty sure this computes the Jacobi symbol
if a == 0: return 0
x, y, L = a, p, 1
while 1:
if x > (y >> 1):
x = y - x
if y & 3 == 3: L = -L
while x & 3 == 0: x >>= 2
if x & 1 == 0:
x >>= 1
if y & 7 == 3 or y & 7 == 5: L = -L
if x == 1: return ((L+1) % p) - 1
if x & 3 == 3 and y & 3 == 3: L = -L
x, y = y % x, x
if mpzv == 0: legendre = legendre1
else:
if mpzv == 1: from gmpy import legendre as legendre0
if mpzv == 2: from gmpy2 import legendre as legendre0
def legendre(n, p): return legendre0(n, p) if (n > 0) and (p % 2 == 1) else legendre1(n, p)
def legendre(n, p): return legendre0(n, p) if (n > 0) and (p % 2 == 1) else legendre1(n, p)
# modular sqrt(n) mod p
# p must be prime
def mod_sqrt(n, p):
a = n%p
if p%4 == 3: return pow(a, (p+1) >> 2, p)
elif p%8 == 5:
v = pow(a << 1, (p-5) >> 3, p)
i = ((a*v*v << 1) % p) - 1
return (a*v*i)%p
elif p%8 == 1: # Shank's method
q, e = p-1, 0
while q&1 == 0:
e += 1
q >>= 1
n = 2
while legendre(n, p) != -1: n += 1
w, x, y, r = pow(a, q, p), pow(a, (q+1) >> 1, p), pow(n, q, p), e
while True:
if w == 1: return x
v, k = w, 0
while v != 1 and k+1 < r:
v = (v*v)%p
k += 1
if k == 0: return x
d = pow(y, 1 << (r-k-1), p)
x, y = (x*d)%p, (d*d)%p
w, r = (w*y)%p, k
else: return a # p == 2
# modular inverse of a mod m
def modinv(a, m):
a, x, u = a%m, 0, 1
while a: x, u, m, a = u, x - (m/a)*u, a, m%a
return x
# Multiple Polynomial Quadratic Sieve
# Most of this function is copied verbatim from https://codegolf.stackexchange.com/questions/8629/9088#9088
def mpqs(n):
# When the bound proves insufficiently large, we throw out all our work and start over.
# TODO: When this happens, get more data, but don't trash what we already have.
# TODO: Rewrite to get a few more relations before proceeding to the linear algebra.
# TODO: When we need to increase the bound, what is the optimal increment?
# Special cases: this function poorly handles primes and perfect powers:
m = ispower(n)
if m: return m
if isprime(n): return n
root_n, root_2n = isqrt(n), isqrt(2*n)
bound = ilog(n**6, 10)**2 # formula chosen by experiment
while True:
try:
prime, mod_root, log_p, num_prime = [], [], [], 0
# find a number of small primes for which n is a quadratic residue
p = 2
while p < bound or num_prime < 3:
leg = legendre(n%p, p)
if leg == 1:
prime += [p]
mod_root += [mod_sqrt(n, p)] # the rhs was [int(mod_sqrt(n, p))]. If we get errors, put it back.
log_p += [log(p, 10)]
num_prime += 1
elif leg == 0: return p
p = nextprime(p)
x_max = len(prime)*60 # size of the sieve
m_val = (x_max * root_2n) >> 1 # maximum value on the sieved range
# fudging the threshold down a bit makes it easier to find powers of primes as factors
# as well as partial-partial relationships, but it also makes the smoothness check slower.
# there's a happy medium somewhere, depending on how efficient the smoothness check is
thresh = log(m_val, 10) * 0.735
# skip small primes. they contribute very little to the log sum
# and add a lot of unnecessary entries to the table
# instead, fudge the threshold down a bit, assuming ~1/4 of them pass
min_prime = mpz(thresh*3)
fudge = sum(log_p[i] for i,p in enumerate(prime) if p < min_prime)/4
thresh -= fudge
smooth, used_prime, partial = [], set(), {}
num_smooth, num_used_prime, num_partial, num_poly, root_A = 0, 0, 0, 0, isqrt(root_2n / x_max)
while num_smooth <= num_used_prime:
# find an integer value A such that:
# A is =~ sqrt(2*n) / x_max
# A is a perfect square
# sqrt(A) is prime, and n is a quadratic residue mod sqrt(A)
while True:
root_A = nextprime(root_A)
leg = legendre(n, root_A)
if leg == 1: break
elif leg == 0: return root_A
A = root_A**2
# solve for an adequate B
# B*B is a quadratic residue mod n, such that B*B-A*C = n
# this is unsolvable if n is not a quadratic residue mod sqrt(A)
b = mod_sqrt(n, root_A)
B = (b + (n - b*b) * modinv(b + b, root_A))%A
C = (B*B - n) / A # B*B-A*C = n <=> C = (B*B-n)/A
num_poly += 1
# sieve for prime factors
sums, i = [0.0]*(2*x_max), 0
for p in prime:
if p < min_prime:
i += 1
continue
logp = log_p[i]
inv_A = modinv(A, p)
# modular root of the quadratic
a, b, k = mpz(((mod_root[i] - B) * inv_A)%p), mpz(((p - mod_root[i] - B) * inv_A)%p), 0
while k < x_max:
if k+a < x_max: sums[k+a] += logp
if k+b < x_max: sums[k+b] += logp
if k:
sums[k-a+x_max] += logp
sums[k-b+x_max] += logp
k += p
i += 1
# check for smooths
i = 0
for v in sums:
if v > thresh:
x, vec, sqr = x_max-i if i > x_max else i, set(), []
# because B*B-n = A*C
# (A*x+B)^2 - n = A*A*x*x+2*A*B*x + B*B - n
# = A*(A*x*x+2*B*x+C)
# gives the congruency
# (A*x+B)^2 = A*(A*x*x+2*B*x+C) (mod n)
# because A is chosen to be square, it doesn't need to be sieved
val = sieve_val = (A*x + 2*B)*x + C
if sieve_val < 0: vec, sieve_val = {-1}, -sieve_val
for p in prime:
while sieve_val%p == 0:
if p in vec: sqr += [p] # track perfect sqr facs to avoid sqrting something huge at the end
vec ^= {p}
sieve_val = mpz(sieve_val / p)
if sieve_val == 1: # smooth
smooth += [(vec, (sqr, (A*x+B), root_A))]
used_prime |= vec
elif sieve_val in partial:
# combine two partials to make a (xor) smooth
# that is, every prime factor with an odd power is in our factor base
pair_vec, pair_vals = partial[sieve_val]
sqr += list(vec & pair_vec) + [sieve_val]
vec ^= pair_vec
smooth += [(vec, (sqr + pair_vals[0], (A*x+B)*pair_vals[1], root_A*pair_vals[2]))]
used_prime |= vec
num_partial += 1
else: partial[sieve_val] = (vec, (sqr, A*x+B, root_A)) # save partial for later pairing
i += 1
num_smooth, num_used_prime = len(smooth), len(used_prime)
used_prime = sorted(list(used_prime))
# set up bit fields for gaussian elimination
masks, mask, bitfields = [], 1, [0]*num_used_prime
for vec, vals in smooth:
masks += [mask]
i = 0
for p in used_prime:
if p in vec: bitfields[i] |= mask
i += 1
mask <<= 1
# row echelon form
offset = 0
null_cols = []
for col in xrange(num_smooth):
pivot = bitfields[col-offset] & masks[col] == 0 # This occasionally throws IndexErrors.
# TODO: figure out why it throws errors and fix it.
for row in xrange(col+1-offset, num_used_prime):
if bitfields[row] & masks[col]:
if pivot: bitfields[col-offset], bitfields[row], pivot = bitfields[row], bitfields[col-offset], False
else: bitfields[row] ^= bitfields[col-offset]
if pivot:
null_cols += [col]
offset += 1
# reduced row echelon form
for row in xrange(num_used_prime):
mask = bitfields[row] & -bitfields[row] # lowest set bit
for up_row in xrange(row):
if bitfields[up_row] & mask: bitfields[up_row] ^= bitfields[row]
# check for non-trivial congruencies
# TODO: if none exist, check combinations of null space columns...
# if _still_ none exist, sieve more values
for col in null_cols:
all_vec, (lh, rh, rA) = smooth[col]
lhs = lh # sieved values (left hand side)
rhs = [rh] # sieved values - n (right hand side)
rAs = [rA] # root_As (cofactor of lhs)
i = 0
for field in bitfields:
if field & masks[col]:
vec, (lh, rh, rA) = smooth[i]
lhs += list(all_vec & vec) + lh
all_vec ^= vec
rhs += [rh]
rAs += [rA]
i += 1
factor = gcd(listprod(rAs)*listprod(lhs) - listprod(rhs), n)
if 1 < factor < n: return factor
except IndexError: pass
bound *= 1.2
def multifactor(n, methods=(pollardRho_brent, pollard_pm1, williams_pp1, ecm, mpqs), verbose=False):
# Note that the multiprocing incurs relatively significant overhead. Only call this if n is proving difficult to factor.
def factory(method, n, output): output.put((method(n), str(method).split()[1]))
factors = mpQueue()
procs = [Process(target=factory, args=(m, n, factors)) for m in methods]
for p in procs: p.start()
(f, g) = factors.get()
for p in procs: p.terminate()
if verbose:
names = {"pollardRho_brent":"prb", "pollard_pm1":"p-1", "williams_pp1":"p+1"}
print "\033[1;31m" + (names[g] if g in names else g) + "\033[;m",
stdout.flush()
return f
def primefac(n, trial_limit=1000, rho_rounds=42000, verbose=False,
methods=(pollardRho_brent, pollard_pm1, williams_pp1, ecm, mpqs)):
# Obtains a complete factorization of n, yielding the prime factors as they are obtained.
# If the user explicitly specifies a splitting method, use that method. Otherwise,
# 1. Pull out small factors with trial division.
# TODO: a few rounds of Fermat's method?
# 2. Do a few rounds of Pollard's Rho algorithm.
# TODO: a few rounds of ECM by itself?
# TODO: a certain amount of P-1?
# 3. Launch multifactor on the remainder. Multifactor has enough overhead that we want to be fairly sure that rho isn't
# likely to yield new factors soon. The default value of rho_rounds=42000 seems good for that but is probably overkill.
if n < 2: return
if isprime(n): yield n; return
factors, nroot = [], isqrt(n)
for p in primegen(): # Note that we remove factors of 2 whether the user wants to or not.
if n%p == 0:
while n%p == 0:
yield p
n /= p
nroot = isqrt(n)
if isprime(n):
yield n
return
if p > nroot:
if n != 1: yield n
return
if p >= trial_limit: break
if isprime(n): yield n; return
if rho_rounds == "inf":
factors = [n]
while len(factors) != 0:
n = min(factors)
factors.remove(n)
f = pollardRho_brent(n)
if isprime(f): yield f
else: factors.append(f)
n /= f
if isprime(n): yield n
else: factors.append(n)
return
factors, difficult = [n], []
while len(factors) != 0:
rhocount = 0
n = factors.pop()
try:
g = n
while g == n:
x, c, g = randrange(1, n), randrange(1, n), 1
y = x
while g==1:
if rhocount >= rho_rounds: raise Exception
rhocount += 1
x = (x**2 + c) % n
y = (y**2 + c) % n
y = (y**2 + c) % n
g = gcd(x-y, n)
# We now have a nontrivial factor g of n. If we took too long to get here, we're actually at the except statement.
if isprime(g): yield g
else: factors.append(g)
n /= g
if isprime(n): yield n
else: factors.append(n)
except Exception: difficult.append(n) # Factoring n took too long. We'll have multifactor chug on it.
factors = difficult
while len(factors) != 0:
n = min(factors)
factors.remove(n)
f = multifactor(n, methods=methods, verbose=verbose)
if isprime(f): yield f
else: factors.append(f)
n /= f
if isprime(n): yield n
else: factors.append(n)
def factorint(n, trial_limit=1000, rho_rounds=42000, methods=(pollardRho_brent, pollard_pm1, williams_pp1, ecm, mpqs)):
out = {}
for p in primefac(n, trial_limit=trial_limit, rho_rounds=rho_rounds, methods=methods): out[p] = out.get(p, 0) + 1
return out
usage = """
This is primefac version 1.1.
USAGE:
primefac [-vs|-sv] [-v|--verbose] [-s|--summary] [-t=NUM] [-r=NUM]
[-m=[prb][,p-1][,p+1][,ecm][,mpqs]] rpn
"rpn" is evaluated using integer arithmetic. Each number that remains on
the stack after evaluation is then factored.
"-t" is the trial division limit. Default == 1000. Use "-t=inf" to use
trial division exclusively.
"-r" is the number of rounds of Pollard's rho algorithm to try before
calling a factor "difficult". Default == 42,000. Use "-r=inf" to use
Pollard's rho exclusively once the trial division is completed.
If verbosity is invoked, we indicate in the output which algorithm produced
which factors during the multifactor phase.
If the summary flag is absent, then output is identical to the output of the
GNU factor command, except possibly for the order of the factors and, if
verbosity has been turned on, the annotations indicating which algorithm
produced which factors.
If the summary flag is present, then output is modified by adding a single
newline between each item's output, before the first, and after the last.
Each item's output is also modified by printing a second line of data
summarizing the results by describing the number of decimal digits in the
input, the number of decimal digits in each prime factor, and the factors'
multiplicities. For example:
>>> user@computer:~$ primefac -s 24 ! 1 - 7 !
>>>
>>> 620448401733239439359999: 991459181683 625793187653
>>> Z24 = P12 x P12 = 625793187653 x 991459181683
>>>
>>> 5040: 2 2 2 2 3 3 5 7
>>> Z4 = P1^4 x P1^2 x P1 x P1 = 2^4 x 3^2 x 5 x 7
>>>
>>> user@computer:~$
Note that the primes in the summary lines are listed in strictly-increasing
order, regardless of the order in which they were found.
The single-character versions of the verbosity and summary flags may be
combined into a single flag, "-vs" or "-sv".
The "-m" flag controls what methods are run during the multifactor phase.
prb and ecm can be listed repeatedly to run multiple instances of these
methods; running multiple instances of p-1, p+1, or mpqs confers no benefit,
so repeated listings of those methods are ignored.
This program can also be imported into your Python scripts as a module.
DETAILS:
Factoring: 1. Trial divide using the primes <= the specified limit.
2. Run Pollard's rho algorithm on the remainder. Declare a
cofactor "difficult" if it survives more than the specified
number of rounds of rho.
3. Subject each remaining cofactor to five splitting methods in
parallel: Pollard's rho algorithm with Brent's improvement,
Pollard's p-1 method,
Williams' p+1 method,
the elliptic curve method,
and the multiple-polynomial quadratic sieve.
Using the "verbose" option will cause primefac to report which of
the various splitting methods separated which factors in stage 3.
RPN: The acceptable binary operators are + - * / % **.
They all have the same meaning as they do in Python source code
--- i.e., they are addition, subtraction, multiplication, integer
division, remainder, and exponentiation.
The acceptable unary operators are ! #. They are the factorial
and primorial, respectively.
There are three aliases: x for *, xx for **, and p! for #.
You may enclose the RPN expression in quotes if you so desire.
PERFORMANCE:
CREDITS:
Not much of this code was mine from the start.
* The MPQS code was copied mostly verbatim from
https://codegolf.stackexchange.com/questions/8629/9088#9088
* The functions to manipulate points in the elliptic curve method were
copied from a reply to the Programming Praxis post at
http://programmingpraxis.com/2010/04/23/
""" # TODO performance, credits
def rpn(instr):
stack = []
for token in instr.split():
if set(token).issubset("1234567890"): stack.append(int(token))
elif len(token) > 1 and token[0] == '-' and set(token[1:]).issubset("1234567890"): stack.append(int(token))
elif token in ('+', '-', '*', '/', '%', '**', 'x', 'xx'): # binary operators
b = stack.pop()
a = stack.pop()
if token == '+' : res = a + b
elif token == '-' : res = a - b
elif token == '*' : res = a * b
elif token == 'x' : res = a * b
elif token == '/' : res = a / b
elif token == '%' : res = a % b
elif token == '**': res = a ** b
elif token == 'xx': res = a ** b
stack.append(res)
elif token in ('!', '#', 'p!'): # unary operators
a = stack.pop()
if token == '!' : res = listprod(xrange(1, a+1))
elif token == '#' : res = listprod(primes(a+1))
elif token == 'p!': res = listprod(primes(a+1))
stack.append(res)
else: raise Exception, "Failed to evaluate RPN expression: not sure what to do with '{t}'.".format(t=token)
return map(mpz, stack)
# TODO timeout?
if __name__ == "__main__":
from sys import stdout, exit, argv
if len(argv) == 1: exit(usage)
start, rpx, tr, rr, veb, su = 1, [], 1000, 42000, False, False
ms = {"prb":pollardRho_brent, "p-1":pollard_pm1, "p+1":williams_pp1, "ecm":ecm, "mpqs":mpqs}
methods = (pollardRho_brent, pollard_pm1, williams_pp1, ecm, mpqs)
try:
for arg in argv[1:]:
if arg in ("-v", "--verbose"): veb = True
elif arg in ("-s", "--summary"): su = True
elif arg in ("-vs", "-sv"): veb, su = True, True
elif arg[:3] == "-t=": tr = "inf" if arg[3:] == "inf" else int(arg[3:]) # Maximum number for trial division
elif arg[:3] == "-r=": rr = "inf" if arg[3:] == "inf" else int(arg[3:]) # Number of rho rounds before multifactor
elif arg[:3] == "-m=": #methods = tuple(ms[x] for x in arg[3:].split(',') if x in ms)
methods = []
for x in arg[3:].split(','):
if x in ms:
if x in ("p-1", "p+1", "mpqs") and ms[x] in methods: continue
methods.append(ms[x])
else: rpx.append(arg)
nums = rpn(' '.join(rpx))
for x in nums: assert isinstance(x, inttypes)
except: exit("Error while parsing arguments")
if su: print
for n in nums:
print "%d:" % n,
f = {}
for p in primefac(n, trial_limit=(n if tr == "inf" else tr), rho_rounds=rr, verbose=veb, methods=methods):
f[p] = f.get(p, 0) + 1
print p,
stdout.flush()
assert isprime(p) and n%p == 0, (n, p)
print
if su:
print "Z%d = " % len(str(n)),
outstr = ""
for p in sorted(f):
if f[p] == 1: outstr += "P%d x " % len(str(p))
else: outstr += "P%d^%d x " % (len(str(p)), f[p])
outstr = outstr[:-2] + " = "
for p in sorted(f):
if f[p] == 1: outstr += " %d x" % p
else: outstr += " %d^%d x" % (p, f[p])
print outstr[:-2]
print
# Fun examples:
# primefac -v 1489576198567193874913874619387459183543154617315437135656
# On my system, the factor race is a bit unpredicatble on this number. prb, ecm, p-1, and mpqs all show up reasonably often.
# primefac -v 12956921851257164598146425167654345673426523793463
# Z50 = P14 x P17 x P20 = 24007127617807 x 28050585032291527 x 19240648901716863967. p-1 gets the P14 and p+1 gets the rest.
# primefac -v 38 ! 1 + --> Z45 = P23 x P23 = 14029308060317546154181 x 37280713718589679646221
# The MPQS (almost always) gets this one. Depending on the system running things, this can take from 10 seconds to 3 minutes.
| goulu/Goulib | Goulib/primefac.py | Python | lgpl-3.0 | 34,617 | [
"Gaussian"
] | ed020f1dd8315324aa776b6e0980f24a966ab4448c293324ba218c508e9b27eb |
""" BHMM: A toolkit for Bayesian hidden Markov model analysis of single-molecule trajectories.
This project provides tools for estimating the number of metastable states, rate
constants between the states, equilibrium populations, distributions
characterizing the states, and distributions of these quantities from
single-molecule data. This data could be FRET data, single-molecule pulling
data, or any data where one or more observables are recorded as a function of
time. A Hidden Markov Model (HMM) is used to interpret the observed dynamics,
and a distribution of models that fit the data is sampled using Bayesian
inference techniques and Markov chain Monte Carlo (MCMC), allowing for both the
characterization of uncertainties in the model and modeling of the expected
information gain by new experiments.
"""
from __future__ import print_function
import os
from os.path import relpath, join
import versioneer
from setuptools import setup, Extension, find_packages
DOCLINES = __doc__.split("\n")
########################
CLASSIFIERS = """\
Development Status :: 3 - Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)
Programming Language :: Python
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Scientific/Engineering :: Chemistry
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
################################################################################
# USEFUL SUBROUTINES
################################################################################
def find_package_data(data_root, package_root):
files = []
for root, dirnames, filenames in os.walk(data_root):
for fn in filenames:
files.append(relpath(join(root, fn), package_root))
return files
################################################################################
# SETUP
################################################################################
def extensions():
from Cython.Build import cythonize
from numpy import get_include
np_inc = get_include()
extensions = [
Extension('bhmm.hidden.impl_c.hidden',
sources = ['./bhmm/hidden/impl_c/hidden.pyx',
'./bhmm/hidden/impl_c/_hidden.c'],
include_dirs = ['/bhmm/hidden/impl_c/', np_inc]),
Extension('bhmm.output_models.impl_c.discrete',
sources = ['./bhmm/output_models/impl_c/discrete.pyx',
'./bhmm/output_models/impl_c/_discrete.c'],
include_dirs = ['/bhmm/output_models/impl_c/', np_inc]),
Extension('bhmm.output_models.impl_c.gaussian',
sources = ['./bhmm/output_models/impl_c/gaussian.pyx',
'./bhmm/output_models/impl_c/_gaussian.c'],
include_dirs = ['/bhmm/output_models/impl_c/', np_inc]),
Extension('bhmm._external.clustering.kmeans_clustering_64',
sources=['./bhmm/_external/clustering/src/clustering.c',
'./bhmm/_external/clustering/src/kmeans.c'],
include_dirs=['./bhmm/_external/clustering/include',
np_inc],
extra_compile_args=['-std=c99','-O3', '-DCLUSTERING_64']),
Extension('bhmm._external.clustering.kmeans_clustering_32',
sources=['./bhmm/_external/clustering/src/clustering.c',
'./bhmm/_external/clustering/src/kmeans.c'],
include_dirs=['./bhmm/_external/clustering/include',
np_inc],
extra_compile_args=['-std=c99','-O3']),
]
return cythonize(extensions)
class lazy_cythonize(list):
"""evaluates extension list lazyly.
pattern taken from http://tinyurl.com/qb8478q"""
def __init__(self, callback):
self._list, self.callback = None, callback
def c_list(self):
if self._list is None: self._list = self.callback()
return self._list
def __iter__(self):
for e in self.c_list(): yield e
def __getitem__(self, ii): return self.c_list()[ii]
def __len__(self): return len(self.c_list())
setup(
name='bhmm',
author='John Chodera and Frank Noe',
author_email='john.chodera@choderalab.org',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='LGPL',
url='https://github.com/bhmm/bhmm',
platforms=['Linux', 'Mac OS-X', 'Unix', 'Windows'],
classifiers=CLASSIFIERS.splitlines(),
package_dir={'bhmm': 'bhmm'},
packages=find_packages(),
# NOTE: examples installs to bhmm.egg/examples/, NOT bhmm.egg/bhmm/examples/.
# You need to do utils.get_data_filename("../examples/*/setup/").
package_data={'bhmm': find_package_data('examples', 'bhmm') +
find_package_data('bhmm/tests/data', 'bhmm')},
zip_safe=False,
install_requires=[
'numpy',
'scipy',
'msmtools',
'six',
],
setup_requires=[
'cython',
'numpy',
],
ext_modules=lazy_cythonize(extensions),
)
| jchodera/bhmm | setup.py | Python | lgpl-3.0 | 5,373 | [
"Gaussian"
] | 156e14baa433f7c55e28445633647347c6f34c5275006c655f2f5b13f6afabaf |
# -*- coding: utf-8 -*-
#
# rate_neuron_dm.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
rate_neuron decision making
------------------
A binary decision is implemented in the form of two rate neurons
engaging in mutual inhibition.
Evidence for each decision is reflected by the mean input
experienced by the respective neuron.
The activity of each neuron is recorded using multimeter devices.
It can be observed how noise as well as the difference in evidence
affects which neuron exhibits larger activity and hence which
decision will be made.
'''
import nest
import pylab
import numpy
'''
First, the Function build_network is defined to build the network and
return the handles of two decision units and the mutimeter
'''
def build_network(sigma, dt):
nest.ResetKernel()
nest.SetKernelStatus({'resolution': dt, 'use_wfr': False})
Params = {'lambda': 0.1, 'sigma': sigma, 'tau': 1., 'rectify_output': True}
D1 = nest.Create('lin_rate_ipn', params=Params)
D2 = nest.Create('lin_rate_ipn', params=Params)
nest.Connect(D1, D2, 'all_to_all', {
'model': 'rate_connection_instantaneous', 'weight': -0.2})
nest.Connect(D2, D1, 'all_to_all', {
'model': 'rate_connection_instantaneous', 'weight': -0.2})
mm = nest.Create('multimeter')
nest.SetStatus(mm, {'interval': dt, 'record_from': ['rate']})
nest.Connect(mm, D1, syn_spec={'delay': dt})
nest.Connect(mm, D2, syn_spec={'delay': dt})
return D1, D2, mm
'''
The function build_network takes the noise parameter sigma
and the time resolution as arguments.
First the Kernel is reset and the use_wfr (waveform-relaxation) is set to
false while the resolution is set to the specified value dt.
Two rate neurons with linear activation functions are created and the
handle is stored in the variables D1 and D2. The output of both decision
units is rectified at zero.
The two decisions units are coupled via mutual inhibition.
Next the multimeter is created and the handle stored in mm and the option
'record_from' is set. The multimeter is then connected to the two units
in order to 'observe' them. The connect function takes the handles as input.
'''
'''
The decision making process is simulated for three different levels of noise
and three differences in evidence for a given decision. The activity of both
decision units is plotted for each scenario.
'''
fig_size = [14, 8]
fig_rows = 3
fig_cols = 3
fig_plots = fig_rows * fig_cols
face = 'white'
edge = 'white'
ax = [None] * fig_plots
fig = pylab.figure(facecolor=face, edgecolor=edge, figsize=fig_size)
dt = 1e-3
sigma = [0.0, 0.1, 0.2]
dE = [0.0, 0.004, 0.008]
T = numpy.linspace(0, 200, 200 / dt - 1)
for i in range(9):
c = i % 3
r = int(i / 3)
D1, D2, mm = build_network(sigma[r], dt)
'''
First using build_network the network is build and the handles of
the decision units and the multimeter are stored in D1, D2 and mm
'''
nest.Simulate(100.0)
nest.SetStatus(D1, {'mu': 1. + dE[c]})
nest.SetStatus(D2, {'mu': 1. - dE[c]})
nest.Simulate(100.0)
'''
The network is simulated using `Simulate`, which takes the desired
simulation time in milliseconds and advances the network state by
this amount of time. After an initial period in the absence of evidence
for either decision, evidence is given by changing the state of each
decision unit. Note that both units receive evidence.
'''
data = nest.GetStatus(mm)
senders = data[0]['events']['senders']
voltages = data[0]['events']['rate']
'''
The activity values ('voltages') are read out by the multimeter
'''
ax[i] = fig.add_subplot(fig_rows, fig_cols, i + 1)
ax[i].plot(T, voltages[numpy.where(senders == D1)],
'b', linewidth=2, label="D1")
ax[i].plot(T, voltages[numpy.where(senders == D2)],
'r', linewidth=2, label="D2")
ax[i].set_ylim([-.5, 12.])
ax[i].get_xaxis().set_ticks([])
ax[i].get_yaxis().set_ticks([])
if c == 0:
ax[i].set_ylabel("activity ($\sigma=%.1f$) " % (sigma[r]))
ax[i].get_yaxis().set_ticks([0, 3, 6, 9, 12])
if r == 0:
ax[i].set_title("$\Delta E=%.3f$ " % (dE[c]))
if c == 2:
pylab.legend(loc=0)
if r == 2:
ax[i].get_xaxis().set_ticks([0, 50, 100, 150, 200])
ax[i].set_xlabel('time (ms)')
'''
The activity of the two units is plottedin each scenario.
In the absence of noise, the network will not make a decision if evidence
for both choices is equal. With noise, this symmetry can be broken and a
decision wil be taken despite identical evidence.
As evidence for D1 relative to D2 increases, it becomes more likely that
the corresponding decision will be taken. For small differences in the
evidence for the two decisions, noise can lead to the 'wrong' decision.
'''
pylab.show()
| terhorstd/nest-simulator | pynest/examples/rate_neuron_dm.py | Python | gpl-2.0 | 5,547 | [
"NEURON"
] | ac5af906fc0ad06dfd8fd71035c3dbc6b95aeec48e1d158155e67f2bbef37e27 |
import sys
import os
import pytest
sys.path.insert(0, os.path.abspath('../../'))
from SeqFindr import blast
from SeqFindr import config
from SeqFindr import imaging
from SeqFindr import seqfindr
from SeqFindr import util
from SeqFindr import vfdb_to_seqfindr
| nbenzakour/SeqFindR | tests/unittests/context.py | Python | apache-2.0 | 261 | [
"BLAST"
] | f8446e2955dce477f813553dc6f3c9f82e41695565ecfcde3a6f9847034bb94e |
#!/usr/bin/env python
'''
Quantile regression model
Model parameters are estimated using iterated reweighted least squares. The
asymptotic covariance matrix estimated using kernel density estimation.
Author: Vincent Arel-Bundock
License: BSD-3
Created: 2013-03-19
The original IRLS function was written for Matlab by Shapour Mohammadi,
University of Tehran, 2008 (shmohammadi@gmail.com), with some lines based on
code written by James P. Lesage in Applied Econometrics Using MATLAB(1999).PP.
73-4. Translated to python with permission from original author by Christian
Prinoth (christian at prinoth dot name).
'''
from statsmodels.compat.python import range
import numpy as np
import warnings
import scipy.stats as stats
from scipy.linalg import pinv
from scipy.stats import norm
from statsmodels.tools.tools import chain_dot
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.tools.decorators import cache_readonly
from statsmodels.regression.linear_model import (RegressionModel,
RegressionResults,
RegressionResultsWrapper)
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
class QuantReg(RegressionModel):
'''Quantile Regression
Estimate a quantile regression model using iterative reweighted least
squares.
Parameters
----------
endog : array or dataframe
endogenous/response variable
exog : array or dataframe
exogenous/explanatory variable(s)
Notes
-----
The Least Absolute Deviation (LAD) estimator is a special case where
quantile is set to 0.5 (q argument of the fit method).
The asymptotic covariance matrix is estimated following the procedure in
Greene (2008, p.407-408), using either the logistic or gaussian kernels
(kernel argument of the fit method).
References
----------
General:
* Birkes, D. and Y. Dodge(1993). Alternative Methods of Regression, John Wiley and Sons.
* Green,W. H. (2008). Econometric Analysis. Sixth Edition. International Student Edition.
* Koenker, R. (2005). Quantile Regression. New York: Cambridge University Press.
* LeSage, J. P.(1999). Applied Econometrics Using MATLAB,
Kernels (used by the fit method):
* Green (2008) Table 14.2
Bandwidth selection (used by the fit method):
* Bofinger, E. (1975). Estimation of a density function using order statistics. Australian Journal of Statistics 17: 1-17.
* Chamberlain, G. (1994). Quantile regression, censoring, and the structure of wages. In Advances in Econometrics, Vol. 1: Sixth World Congress, ed. C. A. Sims, 171-209. Cambridge: Cambridge University Press.
* Hall, P., and S. Sheather. (1988). On the distribution of the Studentized quantile. Journal of the Royal Statistical Society, Series B 50: 381-391.
Keywords: Least Absolute Deviation(LAD) Regression, Quantile Regression,
Regression, Robust Estimation.
'''
def __init__(self, endog, exog, **kwargs):
super(QuantReg, self).__init__(endog, exog, **kwargs)
def whiten(self, data):
"""
QuantReg model whitener does nothing: returns data.
"""
return data
def fit(self, q=.5, vcov='robust', kernel='epa', bandwidth='hsheather',
max_iter=1000, p_tol=1e-6, **kwargs):
'''Solve by Iterative Weighted Least Squares
Parameters
----------
q : float
Quantile must be between 0 and 1
vcov : string, method used to calculate the variance-covariance matrix
of the parameters. Default is ``robust``:
- robust : heteroskedasticity robust standard errors (as suggested
in Greene 6th edition)
- iid : iid errors (as in Stata 12)
kernel : string, kernel to use in the kernel density estimation for the
asymptotic covariance matrix:
- epa: Epanechnikov
- cos: Cosine
- gau: Gaussian
- par: Parzene
bandwidth: string, Bandwidth selection method in kernel density
estimation for asymptotic covariance estimate (full
references in QuantReg docstring):
- hsheather: Hall-Sheather (1988)
- bofinger: Bofinger (1975)
- chamberlain: Chamberlain (1994)
'''
if q < 0 or q > 1:
raise Exception('p must be between 0 and 1')
kern_names = ['biw', 'cos', 'epa', 'gau', 'par']
if kernel not in kern_names:
raise Exception("kernel must be one of " + ', '.join(kern_names))
else:
kernel = kernels[kernel]
if bandwidth == 'hsheather':
bandwidth = hall_sheather
elif bandwidth == 'bofinger':
bandwidth = bofinger
elif bandwidth == 'chamberlain':
bandwidth = chamberlain
else:
raise Exception("bandwidth must be in 'hsheather', 'bofinger', 'chamberlain'")
endog = self.endog
exog = self.exog
nobs = self.nobs
exog_rank = np_matrix_rank(self.exog)
self.rank = exog_rank
self.df_model = float(self.rank - self.k_constant)
self.df_resid = self.nobs - self.rank
n_iter = 0
xstar = exog
beta = np.ones(exog_rank)
# TODO: better start, initial beta is used only for convergence check
# Note the following doesn't work yet,
# the iteration loop always starts with OLS as initial beta
# if start_params is not None:
# if len(start_params) != rank:
# raise ValueError('start_params has wrong length')
# beta = start_params
# else:
# # start with OLS
# beta = np.dot(np.linalg.pinv(exog), endog)
diff = 10
cycle = False
history = dict(params = [], mse=[])
while n_iter < max_iter and diff > p_tol and not cycle:
n_iter += 1
beta0 = beta
xtx = np.dot(xstar.T, exog)
xty = np.dot(xstar.T, endog)
beta = np.dot(pinv(xtx), xty)
resid = endog - np.dot(exog, beta)
mask = np.abs(resid) < .000001
resid[mask] = np.sign(resid[mask]) * .000001
resid = np.where(resid < 0, q * resid, (1-q) * resid)
resid = np.abs(resid)
xstar = exog / resid[:, np.newaxis]
diff = np.max(np.abs(beta - beta0))
history['params'].append(beta)
history['mse'].append(np.mean(resid*resid))
if (n_iter >= 300) and (n_iter % 100 == 0):
# check for convergence circle, shouldn't happen
for ii in range(2, 10):
if np.all(beta == history['params'][-ii]):
cycle = True
break
warnings.warn("Convergence cycle detected", ConvergenceWarning)
if n_iter == max_iter:
warnings.warn("Maximum number of iterations (1000) reached.",
IterationLimitWarning)
e = endog - np.dot(exog, beta)
# Greene (2008, p.407) writes that Stata 6 uses this bandwidth:
# h = 0.9 * np.std(e) / (nobs**0.2)
# Instead, we calculate bandwidth as in Stata 12
iqre = stats.scoreatpercentile(e, 75) - stats.scoreatpercentile(e, 25)
h = bandwidth(nobs, q)
h = min(np.std(endog),
iqre / 1.34) * (norm.ppf(q + h) - norm.ppf(q - h))
fhat0 = 1. / (nobs * h) * np.sum(kernel(e / h))
if vcov == 'robust':
d = np.where(e > 0, (q/fhat0)**2, ((1-q)/fhat0)**2)
xtxi = pinv(np.dot(exog.T, exog))
xtdx = np.dot(exog.T * d[np.newaxis, :], exog)
vcov = chain_dot(xtxi, xtdx, xtxi)
elif vcov == 'iid':
vcov = (1. / fhat0)**2 * q * (1 - q) * pinv(np.dot(exog.T, exog))
else:
raise Exception("vcov must be 'robust' or 'iid'")
lfit = QuantRegResults(self, beta, normalized_cov_params=vcov)
lfit.q = q
lfit.iterations = n_iter
lfit.sparsity = 1. / fhat0
lfit.bandwidth = h
lfit.history = history
return RegressionResultsWrapper(lfit)
def _parzen(u):
z = np.where(np.abs(u) <= .5, 4./3 - 8. * u**2 + 8. * np.abs(u)**3,
8. * (1 - np.abs(u))**3 / 3.)
z[np.abs(u) > 1] = 0
return z
kernels = {}
kernels['biw'] = lambda u: 15. / 16 * (1 - u**2)**2 * np.where(np.abs(u) <= 1, 1, 0)
kernels['cos'] = lambda u: np.where(np.abs(u) <= .5, 1 + np.cos(2 * np.pi * u), 0)
kernels['epa'] = lambda u: 3. / 4 * (1-u**2) * np.where(np.abs(u) <= 1, 1, 0)
kernels['gau'] = lambda u: norm.pdf(u)
kernels['par'] = _parzen
#kernels['bet'] = lambda u: np.where(np.abs(u) <= 1, .75 * (1 - u) * (1 + u), 0)
#kernels['log'] = lambda u: logistic.pdf(u) * (1 - logistic.pdf(u))
#kernels['tri'] = lambda u: np.where(np.abs(u) <= 1, 1 - np.abs(u), 0)
#kernels['trw'] = lambda u: 35. / 32 * (1 - u**2)**3 * np.where(np.abs(u) <= 1, 1, 0)
#kernels['uni'] = lambda u: 1. / 2 * np.where(np.abs(u) <= 1, 1, 0)
def hall_sheather(n, q, alpha=.05):
z = norm.ppf(q)
num = 1.5 * norm.pdf(z)**2.
den = 2. * z**2. + 1.
h = n**(-1. / 3) * norm.ppf(1. - alpha / 2.)**(2./3) * (num / den)**(1./3)
return h
def bofinger(n, q):
num = 9. / 2 * norm.pdf(2 * norm.ppf(q))**4
den = (2 * norm.ppf(q)**2 + 1)**2
h = n**(-1. / 5) * (num / den)**(1. / 5)
return h
def chamberlain(n, q, alpha=.05):
return norm.ppf(1 - alpha / 2) * np.sqrt(q*(1 - q) / n)
class QuantRegResults(RegressionResults):
'''Results instance for the QuantReg model'''
@cache_readonly
def prsquared(self):
q = self.q
endog = self.model.endog
e = self.resid
e = np.where(e < 0, (1 - q) * e, q * e)
e = np.abs(e)
ered = endog - stats.scoreatpercentile(endog, q * 100)
ered = np.where(ered < 0, (1 - q) * ered, q * ered)
ered = np.abs(ered)
return 1 - np.sum(e) / np.sum(ered)
#@cache_readonly
def scale(self):
return 1.
@cache_readonly
def bic(self):
return np.nan
@cache_readonly
def aic(self):
return np.nan
@cache_readonly
def llf(self):
return np.nan
@cache_readonly
def rsquared(self):
return np.nan
@cache_readonly
def rsquared_adj(self):
return np.nan
@cache_readonly
def mse(self):
return np.nan
@cache_readonly
def mse_model(self):
return np.nan
@cache_readonly
def mse_total(self):
return np.nan
@cache_readonly
def centered_tss(self):
return np.nan
@cache_readonly
def uncentered_tss(self):
return np.nan
@cache_readonly
def HC0_se(self):
raise NotImplementedError
@cache_readonly
def HC1_se(self):
raise NotImplementedError
@cache_readonly
def HC2_se(self):
raise NotImplementedError
@cache_readonly
def HC3_se(self):
raise NotImplementedError
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: import where we need it (for now), add as cached attributes
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[0])
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None)
]
top_right = [('Pseudo R-squared:', ["%#8.4g" % self.prsquared]),
('Bandwidth:', ["%#8.4g" % self.bandwidth]),
('Sparsity:', ["%#8.4g" % self.sparsity]),
('No. Observations:', None),
('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
('Df Model:', None) #[self.df_model])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:', ["%#8.3f" % durbin_watson(self.wresid)]),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=.05,
use_t=True)
# smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
#yname=yname, xname=xname,
#title="")
#add warnings/notes, added to text format only
etext = []
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: #TODO: what is recommended
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry
| detrout/debian-statsmodels | statsmodels/regression/quantile_regression.py | Python | bsd-3-clause | 15,300 | [
"Gaussian"
] | a9f5a6ee62852ece7dfd414a1d280fc90d53fdf87a72133f7204581d28d4e60c |
import logging
import re
from importlib import import_module
import pycountry
from django.apps import apps
from django.conf import settings
from django.contrib.sessions.models import Session
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.template.defaultfilters import slugify
from django.test.client import RequestFactory
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from modelcluster.fields import ParentalKey
from user_agents import parse
from wagtail.admin.edit_handlers import (
FieldPanel, FieldRowPanel, PageChooserPanel)
from wagtail_personalisation.utils import get_client_ip
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
logger = logging.getLogger(__name__)
def get_geoip_module():
try:
from django.contrib.gis.geoip2 import GeoIP2
return GeoIP2
except ImportError:
logger.exception(
'GeoIP module is disabled. To use GeoIP for the origin\n'
'country personaliastion rule please set it up as per '
'documentation:\n'
'https://docs.djangoproject.com/en/stable/ref/contrib/gis/'
'geoip2/.\n'
'Wagtail-personalisation also works with Cloudflare and\n'
'CloudFront country detection, so you should not see this\n'
'warning if you use one of those.')
class AbstractBaseRule(models.Model):
"""Base for creating rules to segment users with."""
icon = 'fa-circle-o'
static = False
segment = ParentalKey(
'wagtail_personalisation.Segment',
related_name="%(app_label)s_%(class)ss",
)
class Meta:
abstract = True
verbose_name = 'Abstract segmentation rule'
def __str__(self):
return str(self._meta.verbose_name)
def test_user(self):
"""Test if the user matches this rule."""
return True
def encoded_name(self):
"""Return a string with a slug for the rule."""
return slugify(str(self).lower())
def description(self):
"""Return a description explaining the functionality of the rule.
Used in the segmentation dashboard.
:returns: A dict containing a title and a value
:rtype: dict
"""
description = {
'title': _('Abstract segmentation rule'),
'value': '',
}
return description
@classmethod
def get_descendant_models(cls):
return [model for model in apps.get_models()
if issubclass(model, AbstractBaseRule)]
class TimeRule(AbstractBaseRule):
"""Time rule to segment users based on a start and end time.
Matches when the time a request is made falls between the
set start time and end time.
"""
icon = 'fa-clock-o'
start_time = models.TimeField(_("Starting time"))
end_time = models.TimeField(_("Ending time"))
panels = [
FieldRowPanel([
FieldPanel('start_time'),
FieldPanel('end_time'),
]),
]
class Meta:
verbose_name = _('Time Rule')
def test_user(self, request=None):
return self.start_time <= timezone.now().time() <= self.end_time
def description(self):
return {
'title': _('These users visit between'),
'value': _('{} and {}').format(
self.start_time.strftime("%H:%M"),
self.end_time.strftime("%H:%M")
),
}
class DayRule(AbstractBaseRule):
"""Day rule to segment users based on the day(s) of a visit.
Matches when the day a request is made matches with the days
set in the rule.
"""
icon = 'fa-calendar-check-o'
mon = models.BooleanField(_("Monday"), default=False)
tue = models.BooleanField(_("Tuesday"), default=False)
wed = models.BooleanField(_("Wednesday"), default=False)
thu = models.BooleanField(_("Thursday"), default=False)
fri = models.BooleanField(_("Friday"), default=False)
sat = models.BooleanField(_("Saturday"), default=False)
sun = models.BooleanField(_("Sunday"), default=False)
panels = [
FieldPanel('mon'),
FieldPanel('tue'),
FieldPanel('wed'),
FieldPanel('thu'),
FieldPanel('fri'),
FieldPanel('sat'),
FieldPanel('sun'),
]
class Meta:
verbose_name = _('Day Rule')
def test_user(self, request=None):
return [self.mon, self.tue, self.wed, self.thu,
self.fri, self.sat, self.sun][timezone.now().date().weekday()]
def description(self):
days = (
('mon', self.mon), ('tue', self.tue), ('wed', self.wed),
('thu', self.thu), ('fri', self.fri), ('sat', self.sat),
('sun', self.sun),
)
chosen_days = [day_name for day_name, chosen in days if chosen]
return {
'title': _('These users visit on'),
'value': ", ".join([day for day in chosen_days]).title(),
}
class ReferralRule(AbstractBaseRule):
"""Referral rule to segment users based on a regex test.
Matches when the referral header in a request matches with
the set regex test.
"""
icon = 'fa-globe'
regex_string = models.TextField(
_("Regular expression to match the referrer"))
panels = [
FieldPanel('regex_string'),
]
class Meta:
verbose_name = _('Referral Rule')
def test_user(self, request):
pattern = re.compile(self.regex_string)
if 'HTTP_REFERER' in request.META:
referer = request.META['HTTP_REFERER']
if pattern.search(referer):
return True
return False
def description(self):
return {
'title': _('These visits originate from'),
'value': self.regex_string,
'code': True
}
class VisitCountRule(AbstractBaseRule):
"""Visit count rule to segment users based on amount of visits to a
specified page.
Matches when the operator and count validate True
when visiting the set page.
"""
icon = 'fa-calculator'
static = True
OPERATOR_CHOICES = (
('more_than', _("More than")),
('less_than', _("Less than")),
('equal_to', _("Equal to")),
)
operator = models.CharField(max_length=20,
choices=OPERATOR_CHOICES, default="more_than")
count = models.PositiveSmallIntegerField(default=0, null=True)
counted_page = models.ForeignKey(
'wagtailcore.Page',
null=False,
blank=False,
on_delete=models.CASCADE,
related_name='+',
)
panels = [
PageChooserPanel('counted_page'),
FieldRowPanel([
FieldPanel('operator'),
FieldPanel('count'),
]),
]
class Meta:
verbose_name = _('Visit count Rule')
def _get_user_session(self, user):
sessions = Session.objects.iterator()
for session in sessions:
session_data = session.get_decoded()
if session_data.get('_auth_user_id') == str(user.id):
return SessionStore(session_key=session.session_key)
return SessionStore()
def test_user(self, request, user=None):
# Local import for cyclic import
from wagtail_personalisation.adapters import (
get_segment_adapter, SessionSegmentsAdapter, SEGMENT_ADAPTER_CLASS)
# Django formsets don't honour 'required' fields so check rule is valid
try:
self.counted_page
except ObjectDoesNotExist:
return False
if user:
# Create a fake request so we can use the adapter
request = RequestFactory().get('/')
request.user = user
# If we're using the session adapter check for an active session
if SEGMENT_ADAPTER_CLASS == SessionSegmentsAdapter:
request.session = self._get_user_session(user)
else:
request.session = SessionStore()
elif not request:
# Return false if we don't have a user or a request
return False
operator = self.operator
segment_count = self.count
adapter = get_segment_adapter(request)
visit_count = adapter.get_visit_count(self.counted_page)
if visit_count and operator == "more_than":
if visit_count > segment_count:
return True
elif visit_count and operator == "less_than":
if visit_count < segment_count:
return True
elif visit_count and operator == "equal_to":
if visit_count == segment_count:
return True
return False
def description(self):
return {
'title': _('These users visited {}').format(
self.counted_page
),
'value': _('{} {} times').format(
self.get_operator_display(),
self.count
),
}
def get_column_header(self):
return "Visit count - %s" % self.counted_page
def get_user_info_string(self, user):
# Local import for cyclic import
from wagtail_personalisation.adapters import (
get_segment_adapter, SessionSegmentsAdapter, SEGMENT_ADAPTER_CLASS)
# Create a fake request so we can use the adapter
request = RequestFactory().get('/')
request.user = user
# If we're using the session adapter check for an active session
if SEGMENT_ADAPTER_CLASS == SessionSegmentsAdapter:
request.session = self._get_user_session(user)
else:
request.session = SessionStore()
adapter = get_segment_adapter(request)
visit_count = adapter.get_visit_count(self.counted_page)
return str(visit_count)
class QueryRule(AbstractBaseRule):
"""Query rule to segment users based on matching queries.
Matches when both the set parameter and value match with one
present in the request query.
"""
icon = 'fa-link'
parameter = models.SlugField(_("The query parameter to search for"),
max_length=20)
value = models.SlugField(_("The value of the parameter to match"),
max_length=20)
panels = [
FieldPanel('parameter'),
FieldPanel('value'),
]
class Meta:
verbose_name = _('Query Rule')
def test_user(self, request):
return request.GET.get(self.parameter, '') == self.value
def description(self):
return {
'title': _('These users used a URL with the query'),
'value': _('?{}={}').format(
self.parameter,
self.value
),
'code': True
}
class DeviceRule(AbstractBaseRule):
"""Device rule to segment users based on matching devices.
Matches when the set device type matches with the one present
in the request user agent headers.
"""
icon = 'fa-tablet'
mobile = models.BooleanField(_("Mobile phone"), default=False)
tablet = models.BooleanField(_("Tablet"), default=False)
desktop = models.BooleanField(_("Desktop"), default=False)
panels = [
FieldPanel('mobile'),
FieldPanel('tablet'),
FieldPanel('desktop'),
]
class Meta:
verbose_name = _('Device Rule')
def test_user(self, request=None):
ua_header = request.META['HTTP_USER_AGENT']
user_agent = parse(ua_header)
if user_agent.is_mobile:
return self.mobile
if user_agent.is_tablet:
return self.tablet
if user_agent.is_pc:
return self.desktop
return False
class UserIsLoggedInRule(AbstractBaseRule):
"""User is logged in rule to segment users based on their authentication
status.
Matches when the user is authenticated.
"""
icon = 'fa-user'
is_logged_in = models.BooleanField(default=False)
panels = [
FieldPanel('is_logged_in'),
]
class Meta:
verbose_name = _('Logged in Rule')
def test_user(self, request=None):
return request.user.is_authenticated == self.is_logged_in
def description(self):
return {
'title': _('These visitors are'),
'value': _('Logged in') if self.is_logged_in else _('Not logged in'),
}
COUNTRY_CHOICES = [(country.alpha_2.lower(), country.name)
for country in pycountry.countries]
class OriginCountryRule(AbstractBaseRule):
"""
Test user against the country or origin of their request.
Using this rule requires setting up GeoIP2 on Django or using
CloudFlare or CloudFront geolocation detection.
"""
country = models.CharField(
max_length=2, choices=COUNTRY_CHOICES,
help_text=_("Select origin country of the request that this rule will "
"match against. This rule will only work if you use "
"Cloudflare or CloudFront IP geolocation or if GeoIP2 "
"module is configured.")
)
class Meta:
verbose_name = _("origin country rule")
def get_cloudflare_country(self, request):
"""
Get country code that has been detected by Cloudflare.
Guide to the functionality:
https://support.cloudflare.com/hc/en-us/articles/200168236-What-does-Cloudflare-IP-Geolocation-do-
"""
try:
return request.META['HTTP_CF_IPCOUNTRY'].lower()
except KeyError:
pass
def get_cloudfront_country(self, request):
try:
return request.META['HTTP_CLOUDFRONT_VIEWER_COUNTRY'].lower()
except KeyError:
pass
def get_geoip_country(self, request):
GeoIP2 = get_geoip_module()
if GeoIP2 is None:
return False
return GeoIP2().country_code(get_client_ip(request)).lower()
def get_country(self, request):
# Prioritise CloudFlare and CloudFront country detection over GeoIP.
functions = (
self.get_cloudflare_country,
self.get_cloudfront_country,
self.get_geoip_country,
)
for function in functions:
result = function(request)
if result:
return result
def test_user(self, request=None):
return (self.get_country(request) or '') == self.country.lower()
| LabD/wagtail-personalisation | src/wagtail_personalisation/rules.py | Python | mit | 14,554 | [
"VisIt"
] | 5d3f85b4b719fdb92ad82542c60007e133adb25dc9cadcb3524b7bbfc4ec2f1b |
"""
Adapted from:
http://blog.macuyiko.com/post/2016/how-to-send-html-mails-with-oauth2-and-gmail-in-python.html
1. Generate and authorize an OAuth2 (generate_oauth2_token)
2. Generate a new access tokens using a refresh token(refresh_token)
3. Generate an OAuth2 string to use for login (access_token)
"""
import os
import base64
import json
import getpass
try:
from urllib.parse import urlencode, quote, unquote
from urllib.request import urlopen
except ImportError:
from urllib import urlencode, quote, unquote, urlopen
try:
input = raw_input
except NameError:
pass
GOOGLE_ACCOUNTS_BASE_URL = 'https://accounts.google.com'
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
def command_to_url(command):
return '%s/%s' % (GOOGLE_ACCOUNTS_BASE_URL, command)
def url_format_params(params):
param_fragments = []
for param in sorted(params.items(), key=lambda x: x[0]):
escaped_url = quote(param[1], safe='~-._')
param_fragments.append('%s=%s' % (param[0], escaped_url))
return '&'.join(param_fragments)
def generate_permission_url(client_id):
params = {}
params['client_id'] = client_id
params['redirect_uri'] = REDIRECT_URI
params['scope'] = 'https://mail.google.com/'
params['response_type'] = 'code'
return '%s?%s' % (command_to_url('o/oauth2/auth'), url_format_params(params))
def call_authorize_tokens(client_id, client_secret, authorization_code):
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['code'] = authorization_code
params['redirect_uri'] = REDIRECT_URI
params['grant_type'] = 'authorization_code'
request_url = command_to_url('o/oauth2/token')
encoded_params = urlencode(params).encode('UTF-8')
response = urlopen(request_url, encoded_params).read().decode('UTF-8')
return json.loads(response)
def call_refresh_token(client_id, client_secret, refresh_token):
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['refresh_token'] = refresh_token
params['grant_type'] = 'refresh_token'
request_url = command_to_url('o/oauth2/token')
encoded_params = urlencode(params).encode('UTF-8')
response = urlopen(request_url, encoded_params).read().decode('UTF-8')
return json.loads(response)
def generate_oauth2_string(username, access_token, as_base64=False):
auth_string = 'user=%s\1auth=Bearer %s\1\1' % (username, access_token)
if as_base64:
auth_string = base64.b64encode(auth_string.encode('ascii')).decode('ascii')
return auth_string
def get_authorization(google_client_id, google_client_secret):
permission_url = generate_permission_url(google_client_id)
print('Navigate to the following URL to auth:\n' + permission_url)
authorization_code = input('Enter verification code: ')
response = call_authorize_tokens(google_client_id, google_client_secret, authorization_code)
return response['refresh_token'], response['access_token'], response['expires_in']
def refresh_authorization(google_client_id, google_client_secret, google_refresh_token):
response = call_refresh_token(google_client_id, google_client_secret, google_refresh_token)
return response['access_token'], response['expires_in']
def get_oauth_string(user, oauth2_info):
access_token, expires_in = refresh_authorization(**oauth2_info)
auth_string = generate_oauth2_string(user, access_token, as_base64=True)
return auth_string
def get_oauth2_info(oauth2_file):
oauth2_file = os.path.expanduser(oauth2_file)
if os.path.isfile(oauth2_file):
with open(oauth2_file) as f:
oauth2_info = json.load(f)
try:
oauth2_info = oauth2_info["installed"]
except KeyError:
return oauth2_info
email_addr = input("Your 'email address': ")
google_client_id = oauth2_info["client_id"]
google_client_secret = oauth2_info["client_secret"]
google_refresh_token, _, _ = get_authorization(google_client_id, google_client_secret)
oauth2_info = {
"email_address": email_addr,
"google_client_id": google_client_id.strip(),
"google_client_secret": google_client_secret.strip(),
"google_refresh_token": google_refresh_token.strip(),
}
with open(oauth2_file, "w") as f:
json.dump(oauth2_info, f)
else:
print("If you do not have an app registered for your email sending purposes, visit:")
print("https://console.developers.google.com")
print("and create a new project.\n")
email_addr = input("Your 'email address': ")
google_client_id = input("Your 'google_client_id': ")
google_client_secret = getpass.getpass("Your 'google_client_secret': ")
google_refresh_token, _, _ = get_authorization(google_client_id, google_client_secret)
oauth2_info = {
"email_address": email_addr,
"google_client_id": google_client_id.strip(),
"google_client_secret": google_client_secret.strip(),
"google_refresh_token": google_refresh_token.strip(),
}
with open(oauth2_file, "w") as f:
json.dump(oauth2_info, f)
return oauth2_info
| kootenpv/yagmail | yagmail/oauth2.py | Python | mit | 5,292 | [
"VisIt"
] | e7856dc140646d29ab0134a75659c21bec4ab92b30eab8ff15040b04274662ef |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Module contains classes presenting Element and Specie (Element + oxidation
state) and PeriodicTable.
"""
__author__ = "Shyue Ping Ong, Michael Kocher"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
import os
import re
import json
from io import open
from pymatgen.core.units import Mass, Length, unitized
from monty.design_patterns import singleton, cached_class
from pymatgen.util.string_utils import formula_double_format
from pymatgen.serializers.json_coders import PMGSONable
from functools import total_ordering
#Loads element data from json file
with open(os.path.join(os.path.dirname(__file__), "periodic_table.json"), "rt"
) as f:
_pt_data = json.load(f)
_pt_row_sizes = (2, 8, 8, 18, 18, 32, 32)
_MAXZ = 119
# List with the correspondence Z --> Symbol
# We use a list instead of a mapping so that we can select slices easily.
_z2symbol = _MAXZ * [None]
for (symbol, data) in _pt_data.items():
_z2symbol[data["Atomic no"]] = symbol
def all_symbols():
"""tuple with element symbols ordered by Z."""
# Break when we get None as we don't want to have None in a list of strings.
symbols = []
for z in range(1, _MAXZ+1):
s = symbol_from_Z(z)
if s is None:
break
symbols.append(s)
return tuple(symbols)
def symbol_from_Z(z):
"""
Return the symbol of the element from the atomic number.
Args:
z (int): Atomic number or slice object
"""
return _z2symbol[z]
_CHARS2L = {
"s": 0,
"p": 1,
"d": 2,
"f": 3,
"g": 4,
"h": 5,
"i": 6,
}
def char2l(char):
"""Concert a character (s, p, d ..) into the angular momentum l (int)."""
return _CHARS2L[char]
ALL_ELEMENT_SYMBOLS = set(_pt_data.keys())
@cached_class
@total_ordering
class Element(object):
"""
Basic immutable element object with all relevant properties.
Only one instance of Element for each symbol is stored after creation,
ensuring that a particular element behaves like a singleton. For all
attributes, missing data (i.e., data for which is not available) is
represented by a None unless otherwise stated.
Args:
symbol (str): Element symbol, e.g., "H", "Fe"
.. attribute:: Z
Atomic number
.. attribute:: symbol
Element symbol
.. attribute:: X
Pauling electronegativity. Elements without an electronegativity
number are assigned a value of zero by default.
.. attribute:: number
Alternative attribute for atomic number
.. attribute:: max_oxidation_state
Maximum oxidation state for element
.. attribute:: min_oxidation_state
Minimum oxidation state for element
.. attribute:: oxidation_states
Tuple of all known oxidation states
.. attribute:: common_oxidation_states
Tuple of all common oxidation states
.. attribute:: full_electronic_structure
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
.. attribute:: row
Returns the periodic table row of the element.
.. attribute:: group
Returns the periodic table group of the element.
.. attribute:: block
Return the block character "s,p,d,f"
.. attribute:: is_noble_gas
True if element is noble gas.
.. attribute:: is_transition_metal
True if element is a transition metal.
.. attribute:: is_rare_earth_metal
True if element is a rare earth metal.
.. attribute:: is_metalloid
True if element is a metalloid.
.. attribute:: is_alkali
True if element is an alkali metal.
.. attribute:: is_alkaline
True if element is an alkaline earth metal (group II).
.. attribute:: is_halogen
True if element is a halogen.
.. attribute:: is_lanthanoid
True if element is a lanthanoid.
.. attribute:: is_actinoid
True if element is a actinoid.
.. attribute:: name
Long name for element. E.g., "Hydrogen".
.. attribute:: atomic_mass
Atomic mass for the element.
.. attribute:: atomic_radius
Atomic radius for the element. This is the empirical value. Data is
obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: atomic_radius_calculated
Calculated atomic radius for the element. This is the empirical value.
Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: van_der_waals_radius
Van der Waals radius for the element. This is the empirical
value. Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: mendeleev_no
Mendeleev number
.. attribute:: electrical_resistivity
Electrical resistivity
.. attribute:: velocity_of_sound
Velocity of sound
.. attribute:: reflectivity
Reflectivity
.. attribute:: refractive_index
Refractice index
.. attribute:: poissons_ratio
Poisson's ratio
.. attribute:: molar_volume
Molar volume
.. attribute:: electronic_structure
Electronic structure. Simplified form with HTML formatting.
E.g., The electronic structure for Fe is represented as
[Ar].3d<sup>6</sup>.4s<sup>2</sup>
.. attribute:: thermal_conductivity
Thermal conductivity
.. attribute:: boiling_point
Boiling point
.. attribute:: melting_point
Melting point
.. attribute:: critical_temperature
Critical temperature
.. attribute:: superconduction_temperature
Superconduction temperature
.. attribute:: liquid_range
Liquid range
.. attribute:: bulk_modulus
Bulk modulus
.. attribute:: youngs_modulus
Young's modulus
.. attribute:: brinell_hardness
Brinell hardness
.. attribute:: rigidity_modulus
Rigidity modulus
.. attribute:: mineral_hardness
Mineral hardness
.. attribute:: vickers_hardness
Vicker's hardness
.. attribute:: density_of_solid
Density of solid phase
.. attribute:: coefficient_of_linear_thermal_expansion
Coefficient of linear thermal expansion
.. attribute:: average_ionic_radius
Average ionic radius for element in ang. The average is taken over all
oxidation states of the element for which data is present.
.. attribute:: ionic_radii
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
def __init__(self, symbol):
self._symbol = "%s" % symbol
self._data = _pt_data[symbol]
#Store key variables for quick access
self._z = self._data["Atomic no"]
self._x = self._data.get("X", 0)
for a in ["name", "mendeleev_no", "electrical_resistivity",
"velocity_of_sound", "reflectivity",
"refractive_index", "poissons_ratio", "molar_volume",
"electronic_structure", "thermal_conductivity",
"boiling_point", "melting_point",
"critical_temperature", "superconduction_temperature",
"liquid_range", "bulk_modulus", "youngs_modulus",
"brinell_hardness", "rigidity_modulus",
"mineral_hardness", "vickers_hardness",
"density_of_solid", "atomic_radius_calculated",
"van_der_waals_radius",
"coefficient_of_linear_thermal_expansion"]:
kstr = a.capitalize().replace("_", " ")
val = self._data.get(kstr, None)
if str(val).startswith("no data"):
val = None
self.__setattr__(a, val)
if str(self._data.get("Atomic radius",
"no data")).startswith("no data"):
self.atomic_radius = None
else:
self.atomic_radius = Length(self._data["Atomic radius"], "ang")
self.atomic_mass = Mass(self._data["Atomic mass"], "amu")
def __getnewargs__(self):
#function used by pickle to recreate object
return self._symbol,
def __getinitargs__(self):
# function used by pickle to recreate object
return self._symbol,
@property
def data(self):
"""
Returns dict of data for element.
"""
return self._data.copy()
@property
@unitized("ang")
def average_ionic_radius(self):
"""
Average ionic radius for element (with units). The average is taken
over all oxidation states of the element for which data is present.
"""
if "Ionic radii" in self._data:
radii = self._data["Ionic radii"]
return sum(radii.values()) / len(radii)
else:
return 0
@property
@unitized("ang")
def ionic_radii(self):
"""
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
if "Ionic radii" in self._data:
return {int(k): v for k, v in self._data["Ionic radii"].items()}
else:
return {}
@property
def Z(self):
"""Atomic number"""
return self._z
@property
def symbol(self):
"""Element symbol"""
return self._symbol
@property
def X(self):
"""Electronegativity"""
return self._x
@property
def number(self):
"""Alternative attribute for atomic number"""
return self.Z
@property
def max_oxidation_state(self):
"""Maximum oxidation state for element"""
if "Oxidation states" in self._data:
return max(self._data["Oxidation states"])
return 0
@property
def min_oxidation_state(self):
"""Minimum oxidation state for element"""
if "Oxidation states" in self._data:
return min(self._data["Oxidation states"])
return 0
@property
def oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Oxidation states", list()))
@property
def common_oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Common oxidation states", list()))
@property
def full_electronic_structure(self):
"""
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
"""
estr = self._data["Electronic structure"]
def parse_orbital(orbstr):
m = re.match("(\d+)([spdfg]+)<sup>(\d+)</sup>", orbstr)
if m:
return int(m.group(1)), m.group(2), int(m.group(3))
return orbstr
data = [parse_orbital(s) for s in estr.split(".")]
if data[0][0] == "[":
sym = data[0].replace("[", "").replace("]", "")
data = Element(sym).full_electronic_structure + data[1:]
return data
def __eq__(self, other):
if not isinstance(other, Element):
return False
return self._z == other._z
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self._z
def __repr__(self):
return "Element " + self.symbol
def __str__(self):
return self.symbol
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted into LiFePO4.
"""
if self.X != other.X:
return self.X < other.X
else:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
@staticmethod
def from_Z(z):
"""
Get an element from an atomic number.
Args:
z (int): Atomic number
Returns:
Element with atomic number z.
"""
for sym, data in _pt_data.items():
if data["Atomic no"] == z:
return Element(sym)
raise ValueError("No element with this atomic number %s" % z)
@staticmethod
def from_row_and_group(row, group):
"""
Returns an element from a row and group number.
.. note::
The 18 group number system is used, i.e., Noble gases are group 18.
"""
for sym in _pt_data.keys():
el = Element(sym)
if el.row == row and el.group == group:
return el
return None
@staticmethod
def is_valid_symbol(symbol):
"""
Returns true if symbol is a valid element symbol.
Args:
symbol (str): Element symbol
Returns:
True if symbol is a valid element (e.g., "H"). False otherwise
(e.g., "Zebra").
"""
return symbol in ALL_ELEMENT_SYMBOLS
@property
def row(self):
"""
Returns the periodic table row of the element.
"""
Z = self._z
total = 0
if 57 <= Z <= 70:
return 8
elif 89 <= Z <= 102:
return 9
for i in range(len(_pt_row_sizes)):
total += _pt_row_sizes[i]
if total >= Z:
return i + 1
return 8
@property
def group(self):
"""
Returns the periodic table group of the element.
"""
Z = self._z
if Z == 1:
return 1
if Z == 2:
return 18
if 3 <= Z <= 18:
if (Z - 2) % 8 == 0:
return 18
elif (Z - 2) % 8 <= 2:
return (Z - 2) % 8
else:
return 10 + (Z - 2) % 8
if 19 <= Z <= 54:
if (Z - 18) % 18 == 0:
return 18
else:
return (Z - 18) % 18
if (Z - 54) % 32 == 0:
return 18
elif (Z - 54) % 32 >= 17:
return (Z - 54) % 32 - 14
else:
return (Z - 54) % 32
@property
def block(self):
"""
Return the block character "s,p,d,f"
"""
block = ""
if self.group in [1, 2]:
block = "s"
elif self.group in range(13, 19):
block = "p"
elif self.is_actinoid or self.is_lanthanoid:
block = "f"
elif self.group in range(3, 13):
block = "d"
else:
print("unable to determine block")
return block
@property
def is_noble_gas(self):
"""
True if element is noble gas.
"""
return self._z in (2, 10, 18, 36, 54, 86, 118)
@property
def is_transition_metal(self):
"""
True if element is a transition metal.
"""
ns = list(range(21, 31))
ns.extend(list(range(39, 49)))
ns.append(57)
ns.extend(list(range(72, 81)))
ns.append(89)
ns.extend(list(range(104, 113)))
return self._z in ns
@property
def is_rare_earth_metal(self):
"""
True if element is a rare earth metal.
"""
return self.is_lanthanoid or self.is_actinoid
@property
def is_metalloid(self):
"""
True if element is a metalloid.
"""
return self.symbol in ("B", "Si", "Ge", "As", "Sb", "Te", "Po")
@property
def is_alkali(self):
"""
True if element is an alkali metal.
"""
return self._z in (3, 11, 19, 37, 55, 87)
@property
def is_alkaline(self):
"""
True if element is an alkaline earth metal (group II).
"""
return self._z in (4, 12, 20, 38, 56, 88)
@property
def is_halogen(self):
"""
True if element is a halogen.
"""
return self._z in (9, 17, 35, 53, 85)
@property
def is_chalcogen(self):
"""
True if element is a chalcogen.
"""
return self._z in (8, 18, 34, 52, 84)
@property
def is_lanthanoid(self):
"""
True if element is a lanthanoid.
"""
return 56 < self._z < 72
@property
def is_actinoid(self):
"""
True if element is a actinoid.
"""
return 88 < self._z < 104
def __deepcopy__(self, memo):
return Element(self.symbol)
@staticmethod
def from_dict(d):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return Element(d["element"])
def as_dict(self):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol}
@cached_class
@total_ordering
class Specie(PMGSONable):
"""
An extension of Element with an oxidation state and other optional
properties. Properties associated with Specie should be "idealized"
values, not calculated values. For example, high-spin Fe2+ may be
assigned an idealized spin of +5, but an actual Fe2+ site may be
calculated to have a magmom of +4.5. Calculated properties should be
assigned to Site objects, and not Specie.
Args:
symbol (str): Element symbol, e.g., Fe
oxidation_state (float): Oxidation state of element, e.g., 2 or -2
properties: Properties associated with the Specie, e.g.,
{"spin": 5}. Defaults to None. Properties must be one of the
Specie supported_properties.
.. attribute:: oxi_state
Oxidation state associated with Specie
.. attribute:: ionic_radius
Ionic radius of Specie (with specific oxidation state).
.. versionchanged:: 2.6.7
Properties are now checked when comparing two Species for equality.
"""
supported_properties = ("spin",)
def __init__(self, symbol, oxidation_state, properties=None):
self._el = Element(symbol)
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getnewargs__(self):
# function used by pickle to recreate object
return self._el.symbol, self._oxi_state, self._properties
def __getinitargs__(self):
# function used by pickle to recreate object
return self._el.symbol, self._oxi_state, self._properties
def __getattr__(self, a):
#overriding getattr doens't play nice with pickle, so we
#can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
try:
return getattr(self._el, a)
except:
raise AttributeError(a)
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
if not isinstance(other, Specie):
return False
return self.symbol == other.symbol \
and self._oxi_state == other._oxi_state \
and self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Given that all oxidation states are below 100 in absolute value, this
should effectively ensure that no two unequal Specie have the same
hash.
"""
return self._el._z * 1000 + int(self._oxi_state)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state.
"""
if self.X != other.X:
return self.X < other.X
elif self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
else:
other_oxi = 0 if isinstance(other, Element) else other.oxi_state
return self.oxi_state < other_oxi
@property
def element(self):
"""
Underlying element object
"""
return self._el
@property
def ionic_radius(self):
"""
Ionic radius of specie. Returns None if data is not present.
"""
return self.ionic_radii.get(self._oxi_state, None)
@property
def oxi_state(self):
"""
Oxidation state of Specie.
"""
return self._oxi_state
@staticmethod
def from_string(species_string):
"""
Returns a Specie from a string representation.
Args:
species_string (str): A typical string representation of a
species, e.g., "Mn2+", "Fe3+", "O2-".
Returns:
A Specie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search("([A-Z][a-z]*)([0-9\.]*)([\+\-])(.*)", species_string)
if m:
sym = m.group(1)
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).split("=")
properties = {toks[0]: float(toks[1])}
return Specie(sym, oxi, properties)
else:
raise ValueError("Invalid Species String")
def __repr__(self):
return "Specie " + self.__str__()
def __str__(self):
output = self.symbol
if self._oxi_state >= 0:
output += formula_double_format(self._oxi_state) + "+"
else:
output += formula_double_format(-self._oxi_state) + "-"
for p, v in self._properties.items():
output += "%s=%s" % (p, v)
return output
def get_crystal_field_spin(self, coordination="oct", spin_config="high"):
"""
Calculate the crystal field spin based on coordination and spin
configuration. Only works for transition metal species.
Args:
coordination (str): Only oct and tet are supported at the moment.
spin_config (str): Supported keywords are "high" or "low".
Returns:
Crystal field spin in Bohr magneton.
Raises:
AttributeError if species is not a valid transition metal or has
an invalid oxidation state.
ValueError if invalid coordination or spin_config.
"""
if coordination not in ("oct", "tet") or \
spin_config not in ("high", "low"):
raise ValueError("Invalid coordination or spin config.")
elec = self.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError(
"Invalid element {} for crystal field calculation.".format(
self.symbol))
nelectrons = elec[-1][2] + elec[-2][2] - self.oxi_state
if nelectrons < 0:
raise AttributeError(
"Invalid oxidation state {} for element {}"
.format(self.oxi_state, self.symbol))
if spin_config == "high":
return nelectrons if nelectrons <= 5 else 10 - nelectrons
elif spin_config == "low":
if coordination == "oct":
if nelectrons <= 3:
return nelectrons
elif nelectrons <= 6:
return 6 - nelectrons
elif nelectrons <= 8:
return nelectrons - 6
else:
return 10 - nelectrons
elif coordination == "tet":
if nelectrons <= 2:
return nelectrons
elif nelectrons <= 4:
return 4 - nelectrons
elif nelectrons <= 7:
return nelectrons - 4
else:
return 10 - nelectrons
def __deepcopy__(self, memo):
return Specie(self.symbol, self.oxi_state, self._properties)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state,
"properties": self._properties}
@classmethod
def from_dict(cls, d):
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
@cached_class
@total_ordering
class DummySpecie(PMGSONable):
"""
A special specie for representing non-traditional elements or species. For
example, representation of vacancies (charged or otherwise), or special
sites, etc.
Args:
symbol (str): An assigned symbol for the dummy specie. Strict
rules are applied to the choice of the symbol. The dummy
symbol cannot have any part of first two letters that will
constitute an Element symbol. Otherwise, a composition may
be parsed wrongly. E.g., "X" is fine, but "Vac" is not
because Vac contains V, a valid Element.
oxidation_state (float): Oxidation state for dummy specie.
Defaults to zero.
.. attribute:: symbol
Symbol for the DummySpecie.
.. attribute:: oxi_state
Oxidation state associated with Specie.
.. attribute:: Z
DummySpecie is always assigned an atomic number of 0.
.. attribute:: X
DummySpecie is always assigned an electronegativity of 0.
"""
def __init__(self, symbol="X", oxidation_state=0, properties=None):
for i in range(1, min(2, len(symbol)) + 1):
if Element.is_valid_symbol(symbol[:i]):
raise ValueError("{} contains {}, which is a valid element "
"symbol.".format(symbol, symbol[:i]))
# Set required attributes for DummySpecie to function like a Specie in
# most instances.
self._symbol = symbol
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getnewargs__(self):
# function used by pickle to recreate object
return self._symbol, self._oxi_state, self._properties
def __getinitargs__(self):
# function used by pickle to recreate object
return self._symbol, self._oxi_state, self._properties
def __getattr__(self, a):
#overriding getattr doens't play nice with pickle, so we
#can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
try:
return getattr(self._el, a)
except:
raise AttributeError(a)
def __hash__(self):
return 1
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
if not isinstance(other, DummySpecie):
return False
return self.symbol == other.symbol \
and self._oxi_state == other._oxi_state
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state.
"""
if self.X != other.X:
return self.X < other.X
elif self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
else:
other_oxi = 0 if isinstance(other, Element) else other.oxi_state
return self.oxi_state < other_oxi
@property
def Z(self):
"""
DummySpecie is always assigned an atomic number of 0.
"""
return 0
@property
def oxi_state(self):
"""
Oxidation state associated with DummySpecie
"""
return self._oxi_state
@property
def X(self):
"""
DummySpecie is always assigned an electronegativity of 0.
"""
return 0
@property
def symbol(self):
return self._symbol
def __deepcopy__(self, memo):
return DummySpecie(self.symbol, self._oxi_state)
@staticmethod
def from_string(species_string):
"""
Returns a Dummy from a string representation.
Args:
species_string (str): A string representation of a dummy
species, e.g., "X2+", "X3+".
Returns:
A DummySpecie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search("([A-Z][a-z]*)([0-9\.]*)([\+\-]*)(.*)", species_string)
if m:
sym = m.group(1)
if m.group(2) == "" and m.group(3) == "":
oxi = 0
else:
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).split("=")
properties = {toks[0]: float(toks[1])}
return DummySpecie(sym, oxi, properties)
raise ValueError("Invalid DummySpecies String")
@classmethod
def safe_from_composition(cls, comp, oxidation_state=0):
"""
Returns a DummySpecie object that can be safely used
with (i.e. not present in) a given composition
"""
# We don't want to add a DummySpecie with the same
# symbol as anything in the composition, even if the
# oxidation state is different
els = comp.element_composition.elements
for c in 'abcdfghijklmnopqrstuvwxyz':
if DummySpecie('X' + c) not in els:
return DummySpecie('X' + c, oxidation_state)
raise ValueError("All attempted DummySpecies already "
"present in {}".format(comp))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state,
"properties": self._properties}
@classmethod
def from_dict(cls, d):
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
def __repr__(self):
return "DummySpecie " + self.__str__()
def __str__(self):
output = self.symbol
if self._oxi_state >= 0:
output += formula_double_format(self._oxi_state) + "+"
else:
output += formula_double_format(-self._oxi_state) + "-"
return output
@singleton
class PeriodicTable(object):
"""
A Periodic table singleton class. This class contains methods on the
collection of all known elements. For example, printing all elements, etc.
"""
def __init__(self):
""" Implementation of the singleton interface """
self._all_elements = dict()
for sym in _pt_data.keys():
self._all_elements[sym] = Element(sym)
def __getattr__(self, name):
return self._all_elements[name]
def __iter__(self):
for sym in _z2symbol:
if sym is not None:
yield self._all_elements[sym]
def __getitem__(self, Z_or_slice):
#print Z_or_slice, symbol_from_Z(Z_or_slice)
try:
if isinstance(Z_or_slice, slice):
return [self._all_elements[sym]
for sym in symbol_from_Z(Z_or_slice)]
else:
return self._all_elements[symbol_from_Z(Z_or_slice)]
except:
raise IndexError("Z_or_slice: %s" % str(Z_or_slice))
@property
def all_elements(self):
"""
List of all known elements as Element objects.
"""
return self._all_elements.values()
def print_periodic_table(self, filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some
filter_function.
Args:
filter_function: A filtering function taking an Element as input
and returning a boolean. For example, setting
filter_function = lambda el: el.X > 2 will print a periodic
table containing only elements with electronegativity > 2.
"""
for row in range(1, 10):
rowstr = []
for group in range(1, 19):
el = Element.from_row_and_group(row, group)
if el and ((not filter_function) or filter_function(el)):
rowstr.append("{:3s}".format(el.symbol))
else:
rowstr.append(" ")
print(" ".join(rowstr))
def get_el_sp(obj):
"""
Utility method to get an Element or Specie from an input obj.
If obj is in itself an element or a specie, it is returned automatically.
If obj is an int or a string representing an integer, the Element
with the atomic number obj is returned.
If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing
which Element parsing will be attempted (e.g., Mn), failing which
DummyElement parsing will be attempted.
Args:
obj (Element/Specie/str/int): An arbitrary object. Supported objects
are actual Element/Specie objects, integers (representing atomic
numbers) or strings (element symbols or species strings).
Returns:
Specie or Element, with a bias for the maximum number of properties
that can be determined.
Raises:
ValueError if obj cannot be converted into an Element or Specie.
"""
if isinstance(obj, (Element, Specie, DummySpecie)):
return obj
def is_integer(s):
try:
c = float(s)
return int(c) == c
except (ValueError, TypeError):
return False
if is_integer(obj):
return Element.from_Z(int(float(obj)))
else:
obj = str(obj)
try:
return Specie.from_string(obj)
except (ValueError, KeyError):
try:
return Element(obj)
except (ValueError, KeyError):
try:
return DummySpecie.from_string(obj)
except:
raise ValueError("Can't parse Element or String from type %s: %s."
% (type(obj), obj))
| sonium0/pymatgen | pymatgen/core/periodic_table.py | Python | mit | 35,839 | [
"CRYSTAL",
"pymatgen"
] | e1eead9ecbb50562a83c7a82f665dbad8e7b8cd0068a5ca5050e6e56afa5e986 |
"""Example implementation of using a marshmallow Schema for both request input
and output with a `use_schema` decorator.
Run the app:
$ python examples/schema_example.py
Try the following with httpie (a cURL-like utility, http://httpie.org):
$ pip install httpie
$ http GET :5001/users/
$ http GET :5001/users/42
$ http POST :5001/users/ username=brian first_name=Brian last_name=May
$ http PATCH :5001/users/42 username=freddie
$ http GET :5001/users/ limit==1
"""
import functools
from flask import Flask, request
import random
from marshmallow import Schema, fields, post_dump
from webargs.flaskparser import parser, use_kwargs
app = Flask(__name__)
##### Fake database and model #####
class Model:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
@classmethod
def insert(cls, db, **kwargs):
collection = db[cls.collection]
new_id = None
if "id" in kwargs: # for setting up fixtures
new_id = kwargs.pop("id")
else: # find a new id
found_id = False
while not found_id:
new_id = random.randint(1, 9999)
if new_id not in collection:
found_id = True
new_record = cls(id=new_id, **kwargs)
collection[new_id] = new_record
return new_record
class User(Model):
collection = "users"
db = {"users": {}}
##### use_schema #####
def use_schema(schema_cls, list_view=False, locations=None):
"""View decorator for using a marshmallow schema to
(1) parse a request's input and
(2) serializing the view's output to a JSON response.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
partial = request.method != "POST"
schema = schema_cls(partial=partial)
use_args_wrapper = parser.use_args(schema, locations=locations)
# Function wrapped with use_args
func_with_args = use_args_wrapper(func)
ret = func_with_args(*args, **kwargs)
return schema.dump(ret, many=list_view)
return wrapped
return decorator
##### Schemas #####
class UserSchema(Schema):
id = fields.Int(dump_only=True)
username = fields.Str(required=True)
first_name = fields.Str()
last_name = fields.Str()
@post_dump(pass_many=True)
def wrap_with_envelope(self, data, many, **kwargs):
return {"data": data}
##### Routes #####
@app.route("/users/<int:user_id>", methods=["GET", "PATCH"])
@use_schema(UserSchema)
def user_detail(reqargs, user_id):
user = db["users"].get(user_id)
if not user:
return {"message": "User not found"}, 404
if request.method == "PATCH" and reqargs:
user.update(**reqargs)
return user
# You can add additional arguments with use_kwargs
@app.route("/users/", methods=["GET", "POST"])
@use_kwargs({"limit": fields.Int(missing=10, location="query")})
@use_schema(UserSchema, list_view=True)
def user_list(reqargs, limit):
users = db["users"].values()
if request.method == "POST":
User.insert(db=db, **reqargs)
return list(users)[:limit]
# Return validation errors as JSON
@app.errorhandler(422)
@app.errorhandler(400)
def handle_validation_error(err):
exc = getattr(err, "exc", None)
if exc:
headers = err.data["headers"]
messages = exc.messages
else:
headers = None
messages = ["Invalid request."]
if headers:
return {"errors": messages}, err.code, headers
else:
return {"errors": messages}, err.code
if __name__ == "__main__":
User.insert(
db=db, id=42, username="fred", first_name="Freddie", last_name="Mercury"
)
app.run(port=5001, debug=True)
| sloria/webargs | examples/schema_example.py | Python | mit | 3,870 | [
"Brian"
] | 5e2aa74127648fa7a7991e532fe04a68336141d035c4620a5b20805d73a5491b |
"""
DIRAC.WorkloadManagementSystem package
"""
__RCSID__ = "$Id$"
| fstagni/DIRAC | WorkloadManagementSystem/__init__.py | Python | gpl-3.0 | 70 | [
"DIRAC"
] | 5856cc31fbd43fc0421cd9c8492d48803f562116c0dd730666e257ea44a9a7ef |
from __future__ import print_function, division
from os.path import join
import tempfile
import shutil
from io import BytesIO
try:
from subprocess import STDOUT, CalledProcessError
from sympy.core.compatibility import check_output
except ImportError:
pass
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.misc import find_executable
from .latex import latex
from sympy.utilities.decorator import doctest_depends_on
@doctest_depends_on(exe=('latex', 'dvipng'), modules=('pyglet',),
disable_viewers=('evince', 'gimp', 'superior-dvi-viewer'))
def preview(expr, output='png', viewer=None, euler=True, packages=(),
filename=None, outputbuffer=None, preamble=None, dvioptions=None,
outputTexFile=None, **latex_settings):
r"""
View expression or LaTeX markup in PNG, DVI, PostScript or PDF form.
If the expr argument is an expression, it will be exported to LaTeX and
then compiled using the available TeX distribution. The first argument,
'expr', may also be a LaTeX string. The function will then run the
appropriate viewer for the given output format or use the user defined
one. By default png output is generated.
By default pretty Euler fonts are used for typesetting (they were used to
typeset the well known "Concrete Mathematics" book). For that to work, you
need the 'eulervm.sty' LaTeX style (in Debian/Ubuntu, install the
texlive-fonts-extra package). If you prefer default AMS fonts or your
system lacks 'eulervm' LaTeX package then unset the 'euler' keyword
argument.
To use viewer auto-detection, lets say for 'png' output, issue
>>> from sympy import symbols, preview, Symbol
>>> x, y = symbols("x,y")
>>> preview(x + y, output='png')
This will choose 'pyglet' by default. To select a different one, do
>>> preview(x + y, output='png', viewer='gimp')
The 'png' format is considered special. For all other formats the rules
are slightly different. As an example we will take 'dvi' output format. If
you would run
>>> preview(x + y, output='dvi')
then 'view' will look for available 'dvi' viewers on your system
(predefined in the function, so it will try evince, first, then kdvi and
xdvi). If nothing is found you will need to set the viewer explicitly.
>>> preview(x + y, output='dvi', viewer='superior-dvi-viewer')
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If 'view' fails to find it on your system it will
gracefully raise an exception.
You may also enter 'file' for the viewer argument. Doing so will cause
this function to return a file object in read-only mode, if 'filename'
is unset. However, if it was set, then 'preview' writes the genereted
file to this filename instead.
There is also support for writing to a BytesIO like object, which needs
to be passed to the 'outputbuffer' argument.
>>> from io import BytesIO
>>> obj = BytesIO()
>>> preview(x + y, output='png', viewer='BytesIO',
... outputbuffer=obj)
The LaTeX preamble can be customized by setting the 'preamble' keyword
argument. This can be used, e.g., to set a different font size, use a
custom documentclass or import certain set of LaTeX packages.
>>> preamble = "\\documentclass[10pt]{article}\n" \
... "\\usepackage{amsmath,amsfonts}\\begin{document}"
>>> preview(x + y, output='png', preamble=preamble)
If the value of 'output' is different from 'dvi' then command line
options can be set ('dvioptions' argument) for the execution of the
'dvi'+output conversion tool. These options have to be in the form of a
list of strings (see subprocess.Popen).
Additional keyword args will be passed to the latex call, e.g., the
symbol_names flag.
>>> phidd = Symbol('phidd')
>>> preview(phidd, symbol_names={phidd:r'\ddot{\varphi}'})
For post-processing the generated TeX File can be written to a file by
passing the desired filename to the 'outputTexFile' keyword
argument. To write the TeX code to a file named
"sample.tex" and run the default png viewer to display the resulting
bitmap, do
>>> preview(x + y, outputTexFile="sample.tex")
"""
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
# TODO add candidates for windows to list
candidates = {
"dvi": [ "evince", "okular", "kdvi", "xdvi" ],
"ps": [ "evince", "okular", "gsview", "gv" ],
"pdf": [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
path = find_executable(candidate)
if path is not None:
viewer = path
break
else:
raise SystemError(
"No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer == "file":
if filename is None:
SymPyDeprecationWarning(feature="Using viewer=\"file\" without a "
"specified filename", deprecated_since_version="0.7.3",
useinstead="viewer=\"file\" and filename=\"desiredname\"",
issue=7018).warn()
elif viewer == "StringIO":
SymPyDeprecationWarning(feature="The preview() viewer StringIO",
useinstead="BytesIO", deprecated_since_version="0.7.4",
issue=7083).warn()
viewer = "BytesIO"
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"StringIO\"")
elif viewer == "BytesIO":
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"BytesIO\"")
elif viewer not in special and not find_executable(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
if preamble is None:
actual_packages = packages + ("amsmath", "amsfonts")
if euler:
actual_packages += ("euler",)
package_includes = "\n" + "\n".join(["\\usepackage{%s}" % p
for p in actual_packages])
preamble = r"""\documentclass[12pt]{article}
\pagestyle{empty}
%s
\begin{document}
""" % (package_includes)
else:
if len(packages) > 0:
raise ValueError("The \"packages\" keyword must not be set if a "
"custom LaTeX preamble was specified")
latex_main = preamble + '\n%s\n\n' + r"\end{document}"
if isinstance(expr, str):
latex_string = expr
else:
latex_string = latex(expr, mode='inline', **latex_settings)
try:
workdir = tempfile.mkdtemp()
with open(join(workdir, 'texput.tex'), 'w') as fh:
fh.write(latex_main % latex_string)
if outputTexFile is not None:
shutil.copyfile(join(workdir, 'texput.tex'), outputTexFile)
if not find_executable('latex'):
raise RuntimeError("latex program is not installed")
try:
check_output(['latex', '-halt-on-error', '-interaction=nonstopmode',
'texput.tex'], cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'latex' exited abnormally with the following output:\n%s" %
e.output)
if output != "dvi":
defaultoptions = {
"ps": [],
"pdf": [],
"png": ["-T", "tight", "-z", "9", "--truecolor"],
"svg": ["--no-fonts"],
}
commandend = {
"ps": ["-o", "texput.ps", "texput.dvi"],
"pdf": ["texput.dvi", "texput.pdf"],
"png": ["-o", "texput.png", "texput.dvi"],
"svg": ["-o", "texput.svg", "texput.dvi"],
}
if output == "svg":
cmd = ["dvisvgm"]
else:
cmd = ["dvi" + output]
if not find_executable(cmd[0]):
raise RuntimeError("%s is not installed" % cmd[0])
try:
if dvioptions is not None:
cmd.extend(dvioptions)
else:
cmd.extend(defaultoptions[output])
cmd.extend(commandend[output])
except KeyError:
raise SystemError("Invalid output format: %s" % output)
try:
check_output(cmd, cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s' exited abnormally with the following output:\n%s" %
(' '.join(cmd), e.output))
src = "texput.%s" % (output)
if viewer == "file":
if filename is None:
buffer = BytesIO()
with open(join(workdir, src), 'rb') as fh:
buffer.write(fh.read())
return buffer
else:
shutil.move(join(workdir,src), filename)
elif viewer == "BytesIO":
with open(join(workdir, src), 'rb') as fh:
outputbuffer.write(fh.read())
elif viewer == "pyglet":
try:
from pyglet import window, image, gl
from pyglet.window import key
except ImportError:
raise ImportError("pyglet is required for preview.\n visit http://www.pyglet.org/")
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(join(workdir, src), decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
config = gl.Config(double_buffer=False)
win = window.Window(
width=img.width + 2*offset,
height=img.height + 2*offset,
caption="sympy",
resizable=False,
config=config
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
try:
check_output([viewer, src], cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s %s' exited abnormally with the following output:\n%s" %
(viewer, src, e.output))
finally:
try:
shutil.rmtree(workdir) # delete directory
except OSError as e:
if e.errno != 2: # code 2 - no such file or directory
raise
| pbrady/sympy | sympy/printing/preview.py | Python | bsd-3-clause | 12,098 | [
"VisIt"
] | c6824f137499e3b72ba3f70aacae5bb88378a87e7a88a03ef5c9410c15ad4e80 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.transition_state import NEBAnalysis
import json
from pymatgen.analysis.transition_state import combine_neb_plots
"""
TODO: Modify unittest doc.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__date__ = "2/5/16"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', 'neb_analysis')
class NEBAnalysisTest(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def runTest(self):
neb_analysis1 = NEBAnalysis.from_dir(os.path.join
(test_dir, 'neb1', 'neb'))
neb_analysis1_from_dict = NEBAnalysis.from_dict(neb_analysis1.as_dict())
json_data = json.dumps(neb_analysis1.as_dict())
neb_dict = json.loads(json_data)
neb_analysis1_from_json_data = NEBAnalysis.from_dict(neb_dict)
self.assertArrayAlmostEqual(neb_analysis1.energies[0], -255.97992669000001)
self.assertArrayAlmostEqual(neb_analysis1.energies[3], -255.84261996000001)
self.assertArrayAlmostEqual(neb_analysis1.r, neb_analysis1_from_dict.r)
self.assertArrayAlmostEqual(neb_analysis1.energies, neb_analysis1_from_dict.energies)
self.assertArrayAlmostEqual(neb_analysis1.forces, neb_analysis1_from_dict.forces)
self.assertEqual(neb_analysis1.structures, neb_analysis1_from_dict.structures)
self.assertArrayAlmostEqual(neb_analysis1.r, neb_analysis1_from_json_data.r)
self.assertArrayAlmostEqual(neb_analysis1.energies, neb_analysis1_from_json_data.energies)
self.assertArrayAlmostEqual(neb_analysis1.forces, neb_analysis1_from_json_data.forces)
self.assertEqual(neb_analysis1.structures, neb_analysis1_from_json_data.structures)
self.assertArrayAlmostEqual(neb_analysis1.get_extrema()[1][0], (0.50023335723480078, 325.20043063935128))
neb_analysis1.setup_spline(spline_options={'saddle_point': 'zero_slope'})
self.assertArrayAlmostEqual(neb_analysis1.get_extrema()[1][0], (0.50023335723480078, 325.20003984140203))
with open(os.path.join(test_dir, 'neb2', 'neb_analysis2.json'),
'r') as f:
neb_analysis2_dict = json.load(f)
neb_analysis2 = NEBAnalysis.from_dict(neb_analysis2_dict)
self.assertArrayAlmostEqual(neb_analysis2.get_extrema()[1][0], (0.37255257367467326, 562.40825334519991))
neb_analysis2.setup_spline(spline_options={'saddle_point': 'zero_slope'})
self.assertArrayAlmostEqual(neb_analysis2.get_extrema()[1][0], (0.30371133723478794, 528.46229631648691))
def test_combine_neb_plots(self):
neb_dir = os.path.join(test_dir, 'neb1', 'neb')
neb_analysis = NEBAnalysis.from_dir(neb_dir)
combine_neb_plots([neb_analysis, neb_analysis])
if __name__ == '__main__':
unittest.main()
| gVallverdu/pymatgen | pymatgen/analysis/tests/test_transition_state.py | Python | mit | 3,242 | [
"pymatgen"
] | 8c9cf59325a33bd20e2452ba2be50936f9dd46b00b6276ef829b14512921715f |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
__all__ = ['BisectingKMeans', 'BisectingKMeansModel',
'KMeans', 'KMeansModel',
'GaussianMixture', 'GaussianMixtureModel',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel']
class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@inherit_doc
class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
HasProbabilityCol, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
Note: For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001,
... maxIter=10, seed=10)
>>> model = gm.fit(df)
>>> weights = model.weights
>>> len(weights)
3
>>> model.gaussiansDF.show()
+--------------------+--------------------+
| mean| cov|
+--------------------+--------------------+
|[-0.0550000000000...|0.002025000000000...|
|[0.82499999999999...|0.005625000000000...|
|[-0.87,-0.7200000...|0.001600000000000...|
+--------------------+--------------------+
...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[4].prediction == rows[5].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.weights == model.weights
True
>>> model2.gaussiansDF.show()
+--------------------+--------------------+
| mean| cov|
+--------------------+--------------------+
|[-0.0550000000000...|0.002025000000000...|
|[0.82499999999999...|0.005625000000000...|
|[-0.87,-0.7200000...|0.001600000000000...|
+--------------------+--------------------+
...
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
self._setDefault(k=2, tol=0.01, maxIter=100)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
Sets params for GaussianMixture.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class KMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Return the K-means cost (sum of squared distances of points to their nearest center)
for this model on the given data.
"""
return self._call_java("computeCost", dataset)
@inherit_doc
class KMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2, seed=1)
>>> model = kmeans.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None)
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self._setDefault(k=2, initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None)
Sets params for KMeans.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
"""
return self._call_java("computeCost", dataset)
@inherit_doc
class BisectingKMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasSeed,
JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> model = bkm.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0):
"""
__init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0)
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0):
"""
setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0)
Sets params for BisectingKMeans.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
@inherit_doc
class LDAModel(JavaModel):
"""
.. note:: Experimental
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
the Expectation-Maximization ("em") `optimizer`, then this method could involve
collecting a large amount of data to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
return LocalLDAModel(self._call_java("toLocal"))
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes:
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
@since("2.0.0")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
Note that removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up the
checkpoints when this model and derivative data go out of scope.
:return List of checkpoint files from training
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an el
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> model = lda.fit(df)
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
@keyword_only
def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
Sets params for LDA.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currenlty only support 'em' and 'online'.
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
if __name__ == "__main__":
import doctest
import pyspark.ml.clustering
from pyspark.sql import SparkSession
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
| DataReplyUK/datareplyuk | GenesAssociation/spark-2.0.0-bin-hadoop2.7/python/pyspark/ml/clustering.py | Python | apache-2.0 | 37,032 | [
"Gaussian"
] | 05eceb69700e47667e1b618615440996c18a1eeb7b1618235614f78340655126 |
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A synchronization map, or sync map,
is a map from text fragments to time intervals.
This package contains the following classes:
* :class:`~aeneas.syncmap.SyncMap`, represents a sync map as a tree of sync map fragments;
* :class:`~aeneas.syncmap.format.SyncMapFormat`, an enumeration of the supported output formats;
* :class:`~aeneas.syncmap.fragment.SyncMapFragment`, connects a text fragment with a ``begin`` and ``end`` time values;
* :class:`~aeneas.syncmap.fragmentlist.SyncMapFragmentList`, a list of sync map fragments with order constraints;
* :class:`~aeneas.syncmap.headtailformat.SyncMapHeadTailFormat`, an enumeration of the supported formats for the sync map head/tail.
* :class:`~aeneas.syncmap.missingparametererror.SyncMapMissingParameterError`, an error raised when reading sync maps from file;
"""
from __future__ import absolute_import
from __future__ import print_function
from copy import deepcopy
from functools import partial
from itertools import chain
import io
import json
import os
from aeneas.logger import Loggable
from aeneas.syncmap.format import SyncMapFormat
from aeneas.syncmap.fragment import SyncMapFragment
from aeneas.syncmap.fragmentlist import SyncMapFragmentList
from aeneas.syncmap.headtailformat import SyncMapHeadTailFormat
from aeneas.syncmap.missingparametererror import SyncMapMissingParameterError
from aeneas.textfile import TextFragment
from aeneas.tree import Tree
import aeneas.globalconstants as gc
import aeneas.globalfunctions as gf
class SyncMap(Loggable):
"""
A synchronization map, that is, a tree of
:class:`~aeneas.syncmap.fragment.SyncMapFragment`
objects.
:param tree: the tree of fragments; if ``None``, an empty one will be created
:type tree: :class:`~aeneas.tree.Tree`
"""
FINETUNEAS_REPLACEMENTS = [
["<!-- AENEAS_REPLACE_COMMENT_BEGIN -->", "<!-- AENEAS_REPLACE_COMMENT_BEGIN"],
["<!-- AENEAS_REPLACE_COMMENT_END -->", "AENEAS_REPLACE_COMMENT_END -->"],
["<!-- AENEAS_REPLACE_UNCOMMENT_BEGIN", "<!-- AENEAS_REPLACE_UNCOMMENT_BEGIN -->"],
["AENEAS_REPLACE_UNCOMMENT_END -->", "<!-- AENEAS_REPLACE_UNCOMMENT_END -->"],
["// AENEAS_REPLACE_SHOW_ID", "showID = true;"],
["// AENEAS_REPLACE_ALIGN_TEXT", "alignText = \"left\""],
["// AENEAS_REPLACE_CONTINUOUS_PLAY", "continuousPlay = true;"],
["// AENEAS_REPLACE_TIME_FORMAT", "timeFormatHHMMSSmmm = true;"],
]
FINETUNEAS_REPLACE_AUDIOFILEPATH = "// AENEAS_REPLACE_AUDIOFILEPATH"
FINETUNEAS_REPLACE_FRAGMENTS = "// AENEAS_REPLACE_FRAGMENTS"
FINETUNEAS_REPLACE_OUTPUT_FORMAT = "// AENEAS_REPLACE_OUTPUT_FORMAT"
FINETUNEAS_REPLACE_SMIL_AUDIOREF = "// AENEAS_REPLACE_SMIL_AUDIOREF"
FINETUNEAS_REPLACE_SMIL_PAGEREF = "// AENEAS_REPLACE_SMIL_PAGEREF"
FINETUNEAS_ALLOWED_FORMATS = [
"csv",
"json",
"smil",
"srt",
"ssv",
"ttml",
"tsv",
"txt",
"vtt",
"xml"
]
FINETUNEAS_PATH = "../res/finetuneas.html"
TAG = u"SyncMap"
def __init__(self, tree=None, rconf=None, logger=None):
if (tree is not None) and (not isinstance(tree, Tree)):
raise TypeError(u"tree is not an instance of Tree")
super(SyncMap, self).__init__(rconf=rconf, logger=logger)
if tree is None:
tree = Tree()
self.fragments_tree = tree
def __len__(self):
return len(self.fragments)
def __unicode__(self):
return u"\n".join([f.__unicode__() for f in self.fragments])
def __str__(self):
return gf.safe_str(self.__unicode__())
@property
def fragments_tree(self):
"""
Return the current tree of fragments.
:rtype: :class:`~aeneas.tree.Tree`
"""
return self.__fragments_tree
@fragments_tree.setter
def fragments_tree(self, fragments_tree):
self.__fragments_tree = fragments_tree
@property
def is_single_level(self):
"""
Return ``True`` if the sync map
has only one level, that is,
if it is a list of fragments
rather than a hierarchical tree.
:rtype: bool
"""
return self.fragments_tree.height <= 2
@property
def fragments(self):
"""
The current list of sync map fragments
which are (the values of) the children of the root node
of the sync map tree.
:rtype: list of :class:`~aeneas.syncmap.fragment.SyncMapFragment`
"""
return self.fragments_tree.vchildren_not_empty
def leaves(self, fragment_type=None):
"""
The current list of sync map fragments
which are (the values of) the leaves
of the sync map tree.
:rtype: list of :class:`~aeneas.syncmap.fragment.SyncMapFragment`
.. versionadded:: 1.7.0
"""
leaves = self.fragments_tree.vleaves_not_empty
if fragment_type is None:
return leaves
return [l for l in leaves if l.fragment_type == fragment_type]
@property
def has_adjacent_leaves_only(self):
"""
Return ``True`` if the sync map fragments
which are the leaves of the sync map tree
are all adjacent.
:rtype: bool
.. versionadded:: 1.7.0
"""
leaves = self.leaves()
for i in range(len(leaves) - 1):
current_interval = leaves[i].interval
next_interval = leaves[i + 1].interval
if not current_interval.is_adjacent_before(next_interval):
return False
return True
@property
def has_zero_length_leaves(self):
"""
Return ``True`` if there is at least one sync map fragment
which has zero length
among the leaves of the sync map tree.
:rtype: bool
.. versionadded:: 1.7.0
"""
for l in self.leaves():
if l.has_zero_length:
return True
return False
@property
def leaves_are_consistent(self):
"""
Return ``True`` if the sync map fragments
which are the leaves of the sync map tree
(except for HEAD and TAIL leaves)
are all consistent, that is,
their intervals do not overlap in forbidden ways.
:rtype: bool
.. versionadded:: 1.7.0
"""
self.log(u"Checking if leaves are consistent")
leaves = self.leaves()
if len(leaves) < 1:
self.log(u"Empty leaves => return True")
return True
min_time = min([l.interval.begin for l in leaves])
self.log([u" Min time: %.3f", min_time])
max_time = max([l.interval.end for l in leaves])
self.log([u" Max time: %.3f", max_time])
self.log(u" Creating SyncMapFragmentList...")
smf = SyncMapFragmentList(
begin=min_time,
end=max_time,
rconf=self.rconf,
logger=self.logger
)
self.log(u" Creating SyncMapFragmentList... done")
self.log(u" Sorting SyncMapFragmentList...")
result = True
not_head_tail = [l for l in leaves if not l.is_head_or_tail]
for l in not_head_tail:
smf.add(l, sort=False)
try:
smf.sort()
self.log(u" Sorting completed => return True")
except ValueError:
self.log(u" Exception while sorting => return False")
result = False
self.log(u" Sorting SyncMapFragmentList... done")
return result
@property
def json_string(self):
"""
Return a JSON representation of the sync map.
:rtype: string
.. versionadded:: 1.3.1
"""
def visit_children(node):
""" Recursively visit the fragments_tree """
output_fragments = []
for child in node.children_not_empty:
fragment = child.value
text = fragment.text_fragment
output_fragments.append({
"id": text.identifier,
"language": text.language,
"lines": text.lines,
"begin": gf.time_to_ssmmm(fragment.begin),
"end": gf.time_to_ssmmm(fragment.end),
"children": visit_children(child)
})
return output_fragments
output_fragments = visit_children(self.fragments_tree)
return gf.safe_unicode(
json.dumps({"fragments": output_fragments}, indent=1, sort_keys=True)
)
def add_fragment(self, fragment, as_last=True):
"""
Add the given sync map fragment,
as the first or last child of the root node
of the sync map tree.
:param fragment: the sync map fragment to be added
:type fragment: :class:`~aeneas.syncmap.fragment.SyncMapFragment`
:param bool as_last: if ``True``, append fragment; otherwise prepend it
:raises: TypeError: if ``fragment`` is ``None`` or
it is not an instance of :class:`~aeneas.syncmap.fragment.SyncMapFragment`
"""
if not isinstance(fragment, SyncMapFragment):
self.log_exc(u"fragment is not an instance of SyncMapFragment", None, True, TypeError)
self.fragments_tree.add_child(Tree(value=fragment), as_last=as_last)
def clear(self):
"""
Clear the sync map, removing all the current fragments.
"""
self.log(u"Clearing sync map")
self.fragments_tree = Tree()
def clone(self):
"""
Return a deep copy of this sync map.
.. versionadded:: 1.7.0
:rtype: :class:`~aeneas.syncmap.SyncMap`
"""
return deepcopy(self)
def output_html_for_tuning(
self,
audio_file_path,
output_file_path,
parameters=None
):
"""
Output an HTML file for fine tuning the sync map manually.
:param string audio_file_path: the path to the associated audio file
:param string output_file_path: the path to the output file to write
:param dict parameters: additional parameters
.. versionadded:: 1.3.1
"""
if not gf.file_can_be_written(output_file_path):
self.log_exc(u"Cannot output HTML file '%s'. Wrong permissions?" % (output_file_path), None, True, OSError)
if parameters is None:
parameters = {}
audio_file_path_absolute = gf.fix_slash(os.path.abspath(audio_file_path))
template_path_absolute = gf.absolute_path(self.FINETUNEAS_PATH, __file__)
with io.open(template_path_absolute, "r", encoding="utf-8") as file_obj:
template = file_obj.read()
for repl in self.FINETUNEAS_REPLACEMENTS:
template = template.replace(repl[0], repl[1])
template = template.replace(
self.FINETUNEAS_REPLACE_AUDIOFILEPATH,
u"audioFilePath = \"file://%s\";" % audio_file_path_absolute
)
template = template.replace(
self.FINETUNEAS_REPLACE_FRAGMENTS,
u"fragments = (%s).fragments;" % self.json_string
)
if gc.PPN_TASK_OS_FILE_FORMAT in parameters:
output_format = parameters[gc.PPN_TASK_OS_FILE_FORMAT]
if output_format in self.FINETUNEAS_ALLOWED_FORMATS:
template = template.replace(
self.FINETUNEAS_REPLACE_OUTPUT_FORMAT,
u"outputFormat = \"%s\";" % output_format
)
if output_format == "smil":
for key, placeholder, replacement in [
(
gc.PPN_TASK_OS_FILE_SMIL_AUDIO_REF,
self.FINETUNEAS_REPLACE_SMIL_AUDIOREF,
"audioref = \"%s\";"
),
(
gc.PPN_TASK_OS_FILE_SMIL_PAGE_REF,
self.FINETUNEAS_REPLACE_SMIL_PAGEREF,
"pageref = \"%s\";"
),
]:
if key in parameters:
template = template.replace(
placeholder,
replacement % parameters[key]
)
with io.open(output_file_path, "w", encoding="utf-8") as file_obj:
file_obj.write(template)
def read(self, sync_map_format, input_file_path, parameters=None):
"""
Read sync map fragments from the given file in the specified format,
and add them the current (this) sync map.
Return ``True`` if the call succeeded,
``False`` if an error occurred.
:param sync_map_format: the format of the sync map
:type sync_map_format: :class:`~aeneas.syncmap.SyncMapFormat`
:param string input_file_path: the path to the input file to read
:param dict parameters: additional parameters (e.g., for ``SMIL`` input)
:raises: ValueError: if ``sync_map_format`` is ``None`` or it is not an allowed value
:raises: OSError: if ``input_file_path`` does not exist
"""
if sync_map_format is None:
self.log_exc(u"Sync map format is None", None, True, ValueError)
if sync_map_format not in SyncMapFormat.CODE_TO_CLASS:
self.log_exc(u"Sync map format '%s' is not allowed" % (sync_map_format), None, True, ValueError)
if not gf.file_can_be_read(input_file_path):
self.log_exc(u"Cannot read sync map file '%s'. Wrong permissions?" % (input_file_path), None, True, OSError)
self.log([u"Input format: '%s'", sync_map_format])
self.log([u"Input path: '%s'", input_file_path])
self.log([u"Input parameters: '%s'", parameters])
reader = (SyncMapFormat.CODE_TO_CLASS[sync_map_format])(
variant=sync_map_format,
parameters=parameters,
rconf=self.rconf,
logger=self.logger
)
# open file for reading
self.log(u"Reading input file...")
with io.open(input_file_path, "r", encoding="utf-8") as input_file:
input_text = input_file.read()
reader.parse(input_text=input_text, syncmap=self)
self.log(u"Reading input file... done")
# overwrite language if requested
language = gf.safe_get(parameters, gc.PPN_SYNCMAP_LANGUAGE, None)
if language is not None:
self.log([u"Overwriting language to '%s'", language])
for fragment in self.fragments:
fragment.text_fragment.language = language
def write(self, sync_map_format, output_file_path, parameters=None):
"""
Write the current sync map to file in the requested format.
Return ``True`` if the call succeeded,
``False`` if an error occurred.
:param sync_map_format: the format of the sync map
:type sync_map_format: :class:`~aeneas.syncmap.SyncMapFormat`
:param string output_file_path: the path to the output file to write
:param dict parameters: additional parameters (e.g., for ``SMIL`` output)
:raises: ValueError: if ``sync_map_format`` is ``None`` or it is not an allowed value
:raises: TypeError: if a required parameter is missing
:raises: OSError: if ``output_file_path`` cannot be written
"""
def select_levels(syncmap, levels):
"""
Select the given levels of the fragments tree,
modifying the given syncmap (always pass a copy of it!).
"""
self.log([u"Levels: '%s'", levels])
if levels is None:
return
try:
levels = [int(l) for l in levels if int(l) > 0]
syncmap.fragments_tree.keep_levels(levels)
self.log([u"Selected levels: %s", levels])
except ValueError:
self.log_warn(u"Cannot convert levels to list of int, returning unchanged")
def set_head_tail_format(syncmap, head_tail_format=None):
"""
Set the appropriate head/tail nodes of the fragments tree,
modifying the given syncmap (always pass a copy of it!).
"""
self.log([u"Head/tail format: '%s'", str(head_tail_format)])
tree = syncmap.fragments_tree
head = tree.get_child(0)
first = tree.get_child(1)
last = tree.get_child(-2)
tail = tree.get_child(-1)
# mark HEAD as REGULAR if needed
if head_tail_format == SyncMapHeadTailFormat.ADD:
head.value.fragment_type = SyncMapFragment.REGULAR
self.log(u"Marked HEAD as REGULAR")
# stretch first and last fragment timings if needed
if head_tail_format == SyncMapHeadTailFormat.STRETCH:
self.log([u"Stretched first.begin: %.3f => %.3f (head)", first.value.begin, head.value.begin])
self.log([u"Stretched last.end: %.3f => %.3f (tail)", last.value.end, tail.value.end])
first.value.begin = head.value.begin
last.value.end = tail.value.end
# mark TAIL as REGULAR if needed
if head_tail_format == SyncMapHeadTailFormat.ADD:
tail.value.fragment_type = SyncMapFragment.REGULAR
self.log(u"Marked TAIL as REGULAR")
# remove all fragments that are not REGULAR
for node in list(tree.dfs):
if (node.value is not None) and (node.value.fragment_type != SyncMapFragment.REGULAR):
node.remove()
if sync_map_format is None:
self.log_exc(u"Sync map format is None", None, True, ValueError)
if sync_map_format not in SyncMapFormat.CODE_TO_CLASS:
self.log_exc(u"Sync map format '%s' is not allowed" % (sync_map_format), None, True, ValueError)
if not gf.file_can_be_written(output_file_path):
self.log_exc(u"Cannot write sync map file '%s'. Wrong permissions?" % (output_file_path), None, True, OSError)
self.log([u"Output format: '%s'", sync_map_format])
self.log([u"Output path: '%s'", output_file_path])
self.log([u"Output parameters: '%s'", parameters])
# select levels and head/tail format
pruned_syncmap = self.clone()
try:
select_levels(pruned_syncmap, parameters[gc.PPN_TASK_OS_FILE_LEVELS])
except:
self.log_warn([u"No %s parameter specified", gc.PPN_TASK_OS_FILE_LEVELS])
try:
set_head_tail_format(pruned_syncmap, parameters[gc.PPN_TASK_OS_FILE_HEAD_TAIL_FORMAT])
except:
self.log_warn([u"No %s parameter specified", gc.PPN_TASK_OS_FILE_HEAD_TAIL_FORMAT])
# create writer
# the constructor will check for required parameters, if any
# if some are missing, it will raise a SyncMapMissingParameterError
writer = (SyncMapFormat.CODE_TO_CLASS[sync_map_format])(
variant=sync_map_format,
parameters=parameters,
rconf=self.rconf,
logger=self.logger
)
# create dir hierarchy, if needed
gf.ensure_parent_directory(output_file_path)
# open file for writing
self.log(u"Writing output file...")
with io.open(output_file_path, "w", encoding="utf-8") as output_file:
output_file.write(writer.format(syncmap=pruned_syncmap))
self.log(u"Writing output file... done")
| danielbair/aeneas | aeneas/syncmap/__init__.py | Python | agpl-3.0 | 20,754 | [
"VisIt"
] | 6ef9c4607b15beee0cd6d6e76ae85677df5f58ead8e726b9e13d4a967065be75 |
"""Axis binary sensor platform tests."""
from homeassistant.components.axis.const import DOMAIN as AXIS_DOMAIN
from homeassistant.components.binary_sensor import (
DOMAIN as BINARY_SENSOR_DOMAIN,
BinarySensorDeviceClass,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from .test_device import NAME, setup_axis_integration
async def test_platform_manually_configured(hass):
"""Test that nothing happens when platform is manually configured."""
assert (
await async_setup_component(
hass,
BINARY_SENSOR_DOMAIN,
{BINARY_SENSOR_DOMAIN: {"platform": AXIS_DOMAIN}},
)
is True
)
assert AXIS_DOMAIN not in hass.data
async def test_no_binary_sensors(hass):
"""Test that no sensors in Axis results in no sensor entities."""
await setup_axis_integration(hass)
assert not hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)
async def test_binary_sensors(hass, mock_rtsp_event):
"""Test that sensors are loaded properly."""
await setup_axis_integration(hass)
mock_rtsp_event(
topic="tns1:Device/tnsaxis:Sensor/PIR",
data_type="state",
data_value="0",
source_name="sensor",
source_idx="0",
)
mock_rtsp_event(
topic="tnsaxis:CameraApplicationPlatform/VMD/Camera1Profile1",
data_type="active",
data_value="1",
)
# Unsupported event
mock_rtsp_event(
topic="tns1:PTZController/tnsaxis:PTZPresets/Channel_1",
data_type="on_preset",
data_value="1",
source_name="PresetToken",
source_idx="0",
)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 2
pir = hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_pir_0")
assert pir.state == STATE_OFF
assert pir.name == f"{NAME} PIR 0"
assert pir.attributes["device_class"] == BinarySensorDeviceClass.MOTION
vmd4 = hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_vmd4_profile_1")
assert vmd4.state == STATE_ON
assert vmd4.name == f"{NAME} VMD4 Profile 1"
assert vmd4.attributes["device_class"] == BinarySensorDeviceClass.MOTION
| home-assistant/home-assistant | tests/components/axis/test_binary_sensor.py | Python | apache-2.0 | 2,246 | [
"VMD"
] | 3fe8122cb1c0a93a3705183539573520d653db2d037105c610b5ecd623378b74 |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# OpenModes - An eigenmode solver for open electromagnetic resonantors
# Copyright (C) 2013 David Powell
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
from __future__ import print_function
import os.path as osp
import numpy as np
from numpy.testing import assert_allclose
import matplotlib.pyplot as plt
import scipy.linalg as la
import openmodes
import openmodes.basis
from openmodes.sources import PlaneWaveSource
from openmodes.constants import c
from openmodes.integration import triangle_centres
from helpers import (read_1d_complex, write_1d_complex,
read_2d_real, write_2d_real)
tests_location = osp.split(__file__)[0]
mesh_dir = osp.join(tests_location, 'input', 'test_horseshoe')
reference_dir = osp.join(tests_location, 'reference', 'test_horseshoe')
def assert_allclose_sign(a, b, rtol):
"""Compare two arrays which should be equal, to within a sign ambiguity
of the whole array (not of each element)"""
assert(np.all(np.abs(a-b) < rtol*abs(a)) or
np.all(np.abs(a+b) < rtol*abs(a)))
def test_horseshoe_modes(plot=False, skip_asserts=False,
write_reference=False):
"Modes of horseshoe"
sim = openmodes.Simulation(name='horseshoe_modes',
basis_class=openmodes.basis.LoopStarBasis)
shoe = sim.load_mesh(osp.join(mesh_dir, 'horseshoe_rect.msh'))
sim.place_part(shoe)
s_start = 2j*np.pi*10e9
estimates = sim.estimate_poles(s_start, modes=3, cauchy_integral=False)
modes = sim.refine_poles(estimates)
mode_s = modes.s
mode_j = modes.vr
print("Singularities found at", mode_s)
if write_reference:
write_1d_complex(osp.join(reference_dir, 'eigenvector_0.txt'),
mode_j["J", :, 'modes', 0])
write_1d_complex(osp.join(reference_dir, 'eigenvector_1.txt'),
mode_j["J", :, 'modes', 1])
write_1d_complex(osp.join(reference_dir, 'eigenvector_2.txt'),
mode_j["J", :, 'modes', 2])
j_0_ref = read_1d_complex(osp.join(reference_dir, 'eigenvector_0.txt'))
j_1_ref = read_1d_complex(osp.join(reference_dir, 'eigenvector_1.txt'))
j_2_ref = read_1d_complex(osp.join(reference_dir, 'eigenvector_2.txt'))
if not skip_asserts:
assert_allclose(mode_s[0], [-2.585729e+09 + 3.156438e+10j,
-1.887518e+10 + 4.500579e+10j,
-1.991163e+10 + 6.846221e+10j],
rtol=1e-3)
assert_allclose_sign(mode_j["J", :, 'modes', 0], j_0_ref, rtol=1e-2)
assert_allclose_sign(mode_j["J", :, 'modes', 1], j_1_ref, rtol=1e-2)
assert_allclose_sign(mode_j["J", :, 'modes', 2], j_2_ref, rtol=1e-2)
if plot:
sim.plot_3d(solution=mode_j["J", :, 'modes', 0], output_format='mayavi',
compress_scalars=3)
sim.plot_3d(solution=mode_j["J", :, 'modes', 1], output_format='mayavi',
compress_scalars=3)
sim.plot_3d(solution=mode_j["J", :, 'modes', 2], output_format='mayavi',
compress_scalars=3)
def test_surface_normals(plot=False, skip_asserts=False,
write_reference=False):
"Test the surface normals of a horseshoe mesh"
sim = openmodes.Simulation()
mesh = sim.load_mesh(osp.join(mesh_dir, 'horseshoe_rect.msh'))
part = sim.place_part(mesh)
basis = sim.basis_container[part]
r, rho = basis.integration_points(mesh.nodes, triangle_centres)
normals = mesh.surface_normals
r = r.reshape((-1, 3))
if write_reference:
write_2d_real(osp.join(reference_dir, 'surface_r.txt'), r)
write_2d_real(osp.join(reference_dir, 'surface_normals.txt'), normals)
r_ref = read_2d_real(osp.join(reference_dir, 'surface_r.txt'))
normals_ref = read_2d_real(osp.join(reference_dir, 'surface_normals.txt'))
if not skip_asserts:
assert_allclose(r, r_ref)
assert_allclose(normals, normals_ref)
if plot:
from mayavi import mlab
mlab.figure()
mlab.quiver3d(r[:, 0], r[:, 1], r[:, 2],
normals[:, 0], normals[:, 1], normals[:, 2],
mode='cone')
mlab.view(distance='auto')
mlab.show()
def test_extinction(plot_extinction=False, skip_asserts=False,
write_reference=False):
"Test extinction of a horseshoe"
sim = openmodes.Simulation(name='horseshoe_extinction',
basis_class=openmodes.basis.LoopStarBasis)
shoe = sim.load_mesh(osp.join(mesh_dir, 'horseshoe_rect.msh'))
sim.place_part(shoe)
num_freqs = 101
freqs = np.linspace(1e8, 20e9, num_freqs)
extinction = np.empty(num_freqs, np.complex128)
e_inc = np.array([1, 0, 0], dtype=np.complex128)
k_hat = np.array([0, 0, 1], dtype=np.complex128)
pw = PlaneWaveSource(e_inc, k_hat)
for freq_count, s in sim.iter_freqs(freqs):
Z = sim.impedance(s)
V = sim.source_vector(pw, s)
extinction[freq_count] = np.vdot(V, Z.solve(V))
if write_reference:
# generate the reference extinction solution
write_1d_complex(osp.join(reference_dir, 'extinction.txt'), extinction)
extinction_ref = read_1d_complex(osp.join(reference_dir, 'extinction.txt'))
if not skip_asserts:
assert_allclose(extinction, extinction_ref, rtol=1e-3)
if plot_extinction:
# to plot the generated and reference solutions
plt.figure()
plt.plot(freqs*1e-9, extinction.real)
plt.plot(freqs*1e-9, extinction_ref.real, '--')
plt.plot(freqs*1e-9, extinction.imag)
plt.plot(freqs*1e-9, extinction_ref.imag, '--')
plt.xlabel('f (GHz)')
plt.show()
def horseshoe_extinction_modes():
sim = openmodes.Simulation(name='horseshoe_extinction_modes',
basis_class=openmodes.basis.LoopStarBasis)
shoe = sim.load_mesh(osp.join('input', 'test_horseshoe',
'horseshoe_rect.msh'))
sim.place_part(shoe)
s_start = 2j*np.pi*10e9
num_modes = 5
mode_s, mode_j = sim.part_singularities(s_start, num_modes)
models = sim.construct_models(mode_s, mode_j)[0]
num_freqs = 101
freqs = np.linspace(1e8, 20e9, num_freqs)
extinction = np.empty(num_freqs, np.complex128)
extinction_sem = np.empty((num_freqs, num_modes), np.complex128)
extinction_eem = np.empty((num_freqs, num_modes), np.complex128)
e_inc = np.array([1, 0, 0], dtype=np.complex128)
k_hat = np.array([0, 0, 1], dtype=np.complex128)
z_sem = np.empty((num_freqs, num_modes), np.complex128)
z_eem = np.empty((num_freqs, num_modes), np.complex128)
z_eem_direct = np.empty((num_freqs, num_modes), np.complex128)
for freq_count, s in sim.iter_freqs(freqs):
Z = sim.impedance(s)[0][0]
V = sim.source_plane_wave(e_inc, s/c*k_hat)[0]
extinction[freq_count] = np.vdot(V, la.solve(Z[:], V))
z_sem[freq_count] = [model.scalar_impedance(s) for model in models]
extinction_sem[freq_count] = [np.vdot(V, model.solve(s, V)) for model in models]
z_eem_direct[freq_count], _ = Z.eigenmodes(num_modes, use_gram=False)
z_eem[freq_count], j_eem = Z.eigenmodes(start_j = mode_j[0], use_gram=True)
extinction_eem[freq_count] = [np.vdot(V, j_eem[:, mode])*np.dot(V, j_eem[:, mode])/z_eem[freq_count, mode] for mode in range(num_modes)]
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.plot(freqs*1e-9, extinction.real)
plt.plot(freqs*1e-9, np.sum(extinction_sem.real, axis=1), '--')
plt.plot(freqs*1e-9, np.sum(extinction_eem.real, axis=1), '-.')
plt.xlabel('f (GHz)')
plt.subplot(122)
plt.plot(freqs*1e-9, extinction.imag)
plt.plot(freqs*1e-9, np.sum(extinction_sem.imag, axis=1), '--')
plt.plot(freqs*1e-9, np.sum(extinction_eem.imag, axis=1), '-.')
plt.suptitle("Extinction")
plt.show()
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.plot(freqs*1e-9, z_eem_direct.real)
#plt.ylim(0, 80)
plt.xlabel('f (GHz)')
plt.subplot(122)
plt.plot(freqs*1e-9, z_eem_direct.imag)
plt.plot([freqs[0]*1e-9, freqs[-1]*1e-9], [0, 0], 'k')
plt.suptitle("EEM impedance without Gram matrix")
plt.show()
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.plot(freqs*1e-9, z_eem.real)
plt.plot(freqs*1e-9, z_sem.real, '--')
#plt.ylim(0, 80)
plt.xlabel('f (GHz)')
plt.subplot(122)
plt.plot(freqs*1e-9, z_eem.imag)
plt.plot(freqs*1e-9, z_sem.imag, '--')
plt.ylim(-100, 100)
# plt.semilogy(freqs*1e-9, abs(z_eem.imag))
# plt.semilogy(freqs*1e-9, abs(z_sem.imag), '--')
plt.suptitle("SEM and EEM impedance")
plt.show()
y_sem = 1/z_sem
y_eem = 1/z_eem
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.plot(freqs*1e-9, y_eem.real)
plt.plot(freqs*1e-9, y_sem.real, '--')
plt.xlabel('f (GHz)')
plt.subplot(122)
plt.plot(freqs*1e-9, y_eem.imag)
plt.plot(freqs*1e-9, y_sem.imag, '--')
plt.xlabel('f (GHz)')
plt.suptitle("SEM and EEM admittance")
plt.show()
if __name__ == "__main__":
#test_extinction_modes()
test_horseshoe_modes(plot=True, skip_asserts=True)
test_extinction(plot_extinction=True, skip_asserts=True)
test_surface_normals(plot=True, skip_asserts=True)
| DavidPowell/OpenModes | test/test_horseshoe.py | Python | gpl-3.0 | 10,192 | [
"Mayavi"
] | cf03be417721084cdb9f8cd55c96afbfd38c7ccf2bd64ad397f811a68967d6b6 |
#!/usr/bin/python
'''
Script to facilitate updating the sector map when at degree 33
'''
# Copyright 2008, 2015 Squiffle
# TODO: Would be nice to somehow check that the enemy list is up-to-date
# TODO: Command-line options to change filenames
# TODO: Command-line option to set jellyfish_sectors_for_empaths
from __future__ import absolute_import
from __future__ import print_function
import ssw_sector_map2 as ssw_sector_map
import ssw_map_utils
version = 0.1
# Defaults
map_filename = "ssw_sector_map.htm"
enemy_sectors_filename = "ssw_enemy_sectors.txt"
jellyfish_sectors_for_empaths = 2
# If enemy sectors file exists, read it
try:
file = open(enemy_sectors_filename)
except IOError:
print()
print("**** Cannot open file %s - assuming no known enemy sectors" % enemy_sectors_filename)
enemy_sectors = []
else:
lines = file.readlines()
file.close()
# Parse out the list of known enemy sectors
for line in lines:
if line.find("sector(s) to probe") > -1:
start = line.find('[')
enemy_sectors = [int(x) for x in line[start+1:-2].split(',')]
# Parse the map, and check that it's valid and current
page = open(map_filename)
p = ssw_sector_map.SectorMapParser(page)
map_valid,reason = p.valid()
if not map_valid:
print("Sector map file %s is invalid - %s" % (map_filename, reason))
sys.exit(2)
if not ssw_map_utils.is_todays(p):
print()
print("**** Map is more than 24 hours old")
# Get the list of unexplored sectors in the current map
unexplored_sectors = ssw_map_utils.all_unknown_sectors(p)
# Calculate which of those aren't known enemy sectors
probe = set(unexplored_sectors) - set(enemy_sectors)
# Which unknown sectors are known to jellyfish ?
unknown_sectors_with_jellyfish = ssw_map_utils.unknown_sectors_with_jellyfish(p)
# Tell user which sectors to probe and whether to talk to the empaths
# Not worth doing the jellyfish thing for one sector
if len(unknown_sectors_with_jellyfish) >= jellyfish_sectors_for_empaths:
print("Visit the empaths to explore these sectors: %s" % str(sorted(list(unknown_sectors_with_jellyfish))))
probe = probe - unknown_sectors_with_jellyfish
if len(probe) > 0:
print("Launch %d probes to explore these sectors: %s" % (len(probe), str(sorted(list(probe)))))
print("Don't forget to run 'ssw_trade_routes.py -mtws > %s' after saving the updated map!" % enemy_sectors_filename)
else:
print("No warp probes needed.")
| UEWBot/ssw-scripts | ssw_probes.py | Python | gpl-3.0 | 2,468 | [
"VisIt"
] | f7ddbcde18051dd8530b252f52ac78b9d889b441f849a574e381cf23ec786ac4 |
# Copyright 2004-2010 PyTom <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains displayables that move, zoom, rotate, or otherwise
# transform displayables. (As well as displayables that support them.)
import math
import renpy
from renpy.display.render import render, IDENTITY, Matrix2D
from renpy.display.layout import Container
# Convert a position from cartesian to polar coordinates.
def cartesian_to_polar(x, y, xaround, yaround):
dx = x - xaround
dy = y - yaround
radius = math.hypot(dx, dy)
angle = math.atan2(dx, -dy) / math.pi * 180
if angle < 0:
angle += 360
return angle, radius
def polar_to_cartesian(angle, radius, xaround, yaround):
angle = angle * math.pi / 180
dx = radius * math.sin(angle)
dy = -radius * math.cos(angle)
x = type(xaround)(xaround + dx)
y = type(yaround)(yaround + dy)
return x, y
class TransformState(renpy.object.Object):
def __init__(self): # W0231
self.alpha = 1
self.rotate = None
self.zoom = 1
self.xzoom = 1
self.yzoom = 1
self.xpos = 0
self.ypos = 0
self.xanchor = 0
self.yanchor = 0
self.xaround = 0.0
self.yaround = 0.0
self.xanchoraround = 0.0
self.yanchoraround = 0.0
self.subpixel = False
self.crop = None
self.corner1 = None
self.corner2 = None
self.size = None
self.delay = 0
def take_state(self, ts):
self.__dict__.update(ts.__dict__)
# Returns a dict, with p -> (old, new) where p is a property that
# has changed between this object and the new object.
def diff(self, ts):
rv = { }
for k, old in self.__dict__.iteritems():
new = ts.__dict__[k]
if old != new:
rv[k] = (old, new)
return rv
# These update various properties.
def get_xalign(self):
return self.xpos
def set_xalign(self, v):
self.xpos = v
self.xanchor = v
xalign = property(get_xalign, set_xalign)
def get_yalign(self):
return self.ypos
def set_yalign(self, v):
self.ypos = v
self.yanchor = v
yalign = property(get_yalign, set_yalign)
def get_around(self):
return (self.xaround, self.yaround)
def set_around(self, value):
self.xaround, self.yaround = value
self.xanchoraround, self.yanchoraround = None, None
def set_alignaround(self, value):
self.xaround, self.yaround = value
self.xanchoraround, self.yanchoraround = value
around = property(get_around, set_around)
alignaround = property(get_around, set_alignaround)
def get_angle(self):
angle, radius = cartesian_to_polar(self.xpos, self.ypos, self.xaround, self.yaround)
return angle
def get_radius(self):
angle, radius = cartesian_to_polar(self.xpos, self.ypos, self.xaround, self.yaround)
return radius
def set_angle(self, value):
angle, radius = cartesian_to_polar(self.xpos, self.ypos, self.xaround, self.yaround)
angle = value
self.xpos, self.ypos = polar_to_cartesian(angle, radius, self.xaround, self.yaround)
if self.xanchoraround:
self.xanchor, self.yanchor = polar_to_cartesian(angle, radius, self.xaround, self.yaround)
def set_radius(self, value):
angle, radius = cartesian_to_polar(self.xpos, self.ypos, self.xaround, self.yaround)
radius = value
self.xpos, self.ypos = polar_to_cartesian(angle, radius, self.xaround, self.yaround)
if self.xanchoraround:
self.xanchor, self.yanchor = polar_to_cartesian(angle, radius, self.xaround, self.yaround)
angle = property(get_angle, set_angle)
radius = property(get_radius, set_radius)
def get_pos(self):
return self.xpos, self.ypos
def set_pos(self, value):
self.xpos, self.ypos = value
pos = property(get_pos, set_pos)
def get_anchor(self):
return self.xanchor, self.yanchor
def set_anchor(self, value):
self.xanchor, self.yanchor = value
anchor = property(get_anchor, set_anchor)
def get_align(self):
return self.xpos, self.ypos
def set_align(self, value):
self.xanchor, self.yanchor = value
self.xpos, self.ypos = value
align = property(get_align, set_align)
class Proxy(object):
"""
This class proxies a field from the transform to its state.
"""
def __init__(self, name):
self.name = name
def __get__(self, instance, owner):
return getattr(instance.state, self.name)
def __set__(self, instance, value):
return setattr(instance.state, self.name, value)
class Transform(Container):
__version__ = 3
transform_event_responder = True
# Proxying things over to our state.
alpha = Proxy("alpha")
rotate = Proxy("rotate")
zoom = Proxy("zoom")
xzoom = Proxy("xzoom")
yzoom = Proxy("yzoom")
xpos = Proxy("xpos")
ypos = Proxy("ypos")
xanchor = Proxy("xanchor")
yanchor = Proxy("yanchor")
xalign = Proxy("xalign")
yalign = Proxy("yalign")
around = Proxy("around")
alignaround = Proxy("alignaround")
angle = Proxy("angle")
radius = Proxy("radius")
xaround = Proxy("xaround")
yaround = Proxy("yaround")
xanchoraround = Proxy("xanchoraround")
yanchoraround = Proxy("yanchoraround")
pos = Proxy("pos")
anchor = Proxy("anchor")
align = Proxy("align")
crop = Proxy("crop")
corner1 = Proxy("corner1")
corner2 = Proxy("corner2")
size = Proxy("size")
delay = Proxy("delay")
def after_upgrade(self, version):
if version < 1:
self.active = False
self.state = TransformState()
self.state.xpos = self.xpos or 0
self.state.ypos = self.ypos or 0
self.state.xanchor = self.xanchor or 0
self.state.yanchor = self.yanchor or 0
self.state.alpha = self.alpha
self.state.rotate = self.rotate
self.state.zoom = self.zoom
self.state.xzoom = self.xzoom
self.state.yzoom = self.yzoom
self.hide_request = False
self.hide_response = True
if version < 2:
self.st = 0
self.at = 0
if version < 3:
self.st_offset = 0
self.at_offset = 0
self.child_st_base = 0
if version < 4:
self.style_arg = 'transform'
# Compatibility with old versions of the class.
active = False
def __init__(self, child=None, function=None, style='transform', **kwargs):
self.kwargs = kwargs
self.style_arg = style
super(Transform, self).__init__(style=style)
self.function = function
if child is not None:
self.add(child)
self.state = TransformState()
# Apply the keyword arguments.
for k, v in kwargs.iteritems():
setattr(self.state, k, v)
# This is the matrix transforming our coordinates into child coordinates.
self.forward = None
# Have we called the function at least once?
self.active = False
# Have we been requested to hide?
self.hide_request = False
# True if it's okay for us to hide.
self.hide_response = True
self.st = 0
self.at = 0
self.st_offset = 0
self.at_offset = 0
self.child_st_base = 0
def take_state(self, t):
"""
Takes the transformation state from object t into this object.
"""
self.state.take_state(t.state)
# Apply the keyword arguments.
for k, v in self.kwargs.iteritems():
setattr(self.state, k, v)
def take_execution_state(self, t):
"""
Takes the execution state from object t into this object. This is
overridden by renpy.atl.TransformBase.
"""
return
def hide(self, st, at):
if not self.hide_request:
d = self()
d.kwargs = { }
d.take_state(self)
d.take_execution_state(self)
else:
d = self
d.st_offset = self.st_offset
d.at_offset = self.at_offset
d.hide_request = True
d.hide_response = True
if d.function is not None:
d.function(d, st, at)
if not d.hide_response:
renpy.display.render.redraw(d, 0)
return d
def set_child(self, child):
self.child = child
self.child_st_base = self.st
def render(self, width, height, st, at):
# Should we perform clipping?
clipping = False
# Preserve the illusion of linear time.
if st == 0:
self.st_offset = self.st
if at == 0:
self.at_offset = self.at
self.st = st = st + self.st_offset
self.at = at = at + self.at_offset
# If we have to, call the function that updates this transform.
if self.function is not None:
fr = self.function(self, st, at)
if fr is not None:
renpy.display.render.redraw(self, fr)
self.active = True
if self.state.size:
width, height = self.state.size
if self.child is None:
raise Exception("Transform does not have a child.")
cr = render(self.child, width, height, st - self.child_st_base, at)
width, height = cr.get_size()
forward = IDENTITY
reverse = IDENTITY
xo = yo = 0
# Cropping.
crop = self.state.crop
if crop is None and self.state.corner1 and self.state.corner2:
x1, y1 = self.state.corner1
x2, y2 = self.state.corner2
minx = min(x1, x2)
maxx = max(x1, x2)
miny = min(y1, y2)
maxy = max(y1, y2)
crop = (minx, miny, maxx - minx, maxy - miny)
if crop:
negative_xo, negative_yo, width, height = crop
xo = -negative_xo
yo = -negative_yo
clipping = True
if self.state.rotate:
clipcr = renpy.display.render.Render(width, height)
clipcr.subpixel_blit(cr, (xo, yo))
clipcr.clipping = clipping
xo = yo = 0
cr = clipcr
clipping = False
# Size.
if self.state.size and self.state.size != (width, height):
nw, nh = self.state.size
xzoom = 1.0 * nw / width
yzoom = 1.0 * nh / height
forward = forward * Matrix2D(1.0 / xzoom, 0, 0, 1.0 / yzoom)
reverse = Matrix2D(xzoom, 0, 0, yzoom) * reverse
xo = xo * xzoom
yo = yo * yzoom
width, height = self.state.size
# Rotation.
if self.state.rotate is not None:
cw = width
ch = height
width = height = math.hypot(cw, ch)
angle = -self.state.rotate * math.pi / 180
xdx = math.cos(angle)
xdy = -math.sin(angle)
ydx = -xdy
ydy = xdx
forward = forward * Matrix2D(xdx, xdy, ydx, ydy)
xdx = math.cos(-angle)
xdy = -math.sin(-angle)
ydx = -xdy
ydy = xdx
reverse = Matrix2D(xdx, xdy, ydx, ydy) * reverse
xo, yo = reverse.transform(-cw / 2.0, -ch / 2.0)
xo += width / 2.0
yo += height / 2.0
xzoom = self.state.zoom * self.state.xzoom
yzoom = self.state.zoom * self.state.yzoom
if xzoom != 1 or yzoom != 1:
forward = forward * Matrix2D(1.0 / xzoom, 0, 0, 1.0 / yzoom)
reverse = Matrix2D(xzoom, 0, 0, yzoom) * reverse
width *= xzoom
height *= yzoom
xo *= xzoom
yo *= yzoom
rv = renpy.display.render.Render(width, height)
if forward is not IDENTITY:
rv.forward = forward
rv.reverse = reverse
self.forward = forward
rv.alpha = self.state.alpha
rv.clipping = clipping
if self.state.subpixel:
rv.subpixel_blit(cr, (xo, yo), main=True)
else:
rv.blit(cr, (xo, yo), main=True)
self.offsets = [ (xo, yo) ]
return rv
def event(self, ev, x, y, st):
if self.hide_request:
return None
children = self.children
offsets = self.offsets
for i in xrange(len(self.children)-1, -1, -1):
d = children[i]
xo, yo = offsets[i]
cx = x - xo
cy = y - yo
# Transform screen coordinates to child coordinates.
cx, cy = self.forward.transform(cx, cy)
rv = d.event(ev, cx, cy, st)
if rv is not None:
return rv
return None
def __call__(self, child=None, take_state=True):
if child is None:
child = self.child
rv = Transform(
child=child,
function=self.function,
style=self.style_arg,
**self.kwargs)
rv.take_state(self)
return rv
def get_placement(self):
if not self.active:
if self.function is not None:
fr = self.function(self, 0, 0)
if fr is not None:
renpy.display.render.redraw(self, fr)
self.active = True
xpos = self.state.xpos
if xpos is None:
xpos = self.style.xpos
ypos = self.state.ypos
if ypos is None:
ypos = self.style.ypos
xanchor = self.state.xanchor
if xanchor is None:
xanchor = self.style.xanchor
yanchor = self.state.yanchor
if yanchor is None:
yanchor = self.style.yanchor
return xpos, ypos, xanchor, yanchor, self.style.xoffset, self.style.yoffset, self.state.subpixel
def update(self):
renpy.display.render.invalidate(self)
def parameterize(self, name, parameters):
if parameters:
raise Exception("Image '%s' can't take parameters '%s'. (Perhaps you got the name wrong?)" %
(' '.join(name), ' '.join(parameters)))
# Note the call here.
return self()
class ATLTransform(renpy.atl.ATLTransformBase, Transform):
def __init__(self, atl, child=None, context={}, parameters=None, style='transform'):
renpy.atl.ATLTransformBase.__init__(self, atl, context, parameters)
Transform.__init__(self, child=child, function=self.execute, style=style)
self.raw_child = self.child
def show(self):
self.execute(self, 0, 0)
class Motion(Container):
"""
This is used to move a child displayable around the screen. It
works by supplying a time value to a user-supplied function,
which is in turn expected to return a pair giving the x and y
location of the upper-left-hand corner of the child, or a
4-tuple giving that and the xanchor and yanchor of the child.
The time value is a floating point number that ranges from 0 to
1. If repeat is True, then the motion repeats every period
sections. (Otherwise, it stops.) If bounce is true, the
time value varies from 0 to 1 to 0 again.
The function supplied needs to be pickleable, which means it needs
to be defined as a name in an init block. It cannot be a lambda or
anonymous inner function. If you can get away with using Pan or
Move, use them instead.
Please note that floats and ints are interpreted as for xpos and
ypos, with floats being considered fractions of the screen.
"""
def __init__(self, function, period, child=None, new_widget=None, old_widget=None, repeat=False, bounce=False, delay=None, anim_timebase=False, tag_start=None, time_warp=None, add_sizes=False, style='motion', **properties):
"""
@param child: The child displayable.
@param new_widget: If child is None, it is set to new_widget,
so that we can speak the transition protocol.
@param old_widget: Ignored, for compatibility with the transition protocol.
@param function: A function that takes a floating point value and returns
an xpos, ypos tuple.
@param period: The amount of time it takes to go through one cycle, in seconds.
@param repeat: Should we repeat after a period is up?
@param bounce: Should we bounce?
@param delay: How long this motion should take. If repeat is None, defaults to period.
@param anim_timebase: If True, use the animation timebase rather than the shown timebase.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
This can also be used as a transition. When used as a
transition, the motion is applied to the new_widget for delay
seconds.
"""
if child is None:
child = new_widget
if delay is None and not repeat:
delay = period
super(Motion, self).__init__(style=style, **properties)
if child is not None:
self.add(child)
self.function = function
self.period = period
self.repeat = repeat
self.bounce = bounce
self.delay = delay
self.anim_timebase = anim_timebase
self.time_warp = time_warp
self.add_sizes = add_sizes
self.position = None
def get_placement(self):
if self.position is None:
return super(Motion, self).get_placement()
else:
return self.position + (self.style.xoffset, self.style.yoffset, self.style.subpixel)
def render(self, width, height, st, at):
if self.anim_timebase:
t = at
else:
t = st
if renpy.game.less_updates:
if self.delay:
t = self.delay
if self.repeat:
t = t % self.period
else:
t = self.period
elif self.delay and t >= self.delay:
t = self.delay
if self.repeat:
t = t % self.period
elif self.repeat:
t = t % self.period
renpy.display.render.redraw(self, 0)
else:
if t > self.period:
t = self.period
else:
renpy.display.render.redraw(self, 0)
if self.period > 0:
t /= self.period
else:
t = 1
if self.time_warp:
t = self.time_warp(t)
if self.bounce:
t = t * 2
if t > 1.0:
t = 2.0 - t
child = render(self.child, width, height, st, at)
cw, ch = child.get_size()
if self.add_sizes:
res = self.function(t, (width, height, cw, ch))
else:
res = self.function(t)
res = tuple(res)
if len(res) == 2:
self.position = res + (self.style.xanchor, self.style.yanchor)
else:
self.position = res
rv = renpy.display.render.Render(cw, ch)
rv.blit(child, (0, 0))
self.offsets = [ (0, 0) ]
return rv
class Interpolate(object):
anchors = {
'top' : 0.0,
'center' : 0.5,
'bottom' : 1.0,
'left' : 0.0,
'right' : 1.0,
}
def __init__(self, start, end):
if len(start) != len(end):
raise Exception("The start and end must have the same number of arguments.")
self.start = [ self.anchors.get(i, i) for i in start ]
self.end = [ self.anchors.get(i, i) for i in end ]
def __call__(self, t, sizes=(None, None, None, None)):
def interp(a, b, c):
if c is not None:
if type(a) is float:
a = a * c
if type(b) is float:
b = b * c
rv = a + t * (b - a)
return renpy.display.core.absolute(rv)
return [ interp(a, b, c) for a, b, c in zip(self.start, self.end, sizes) ]
def Pan(startpos, endpos, time, child=None, repeat=False, bounce=False,
anim_timebase=False, style='motion', time_warp=None, **properties):
"""
This is used to pan over a child displayable, which is almost
always an image. It works by interpolating the placement of the
upper-left corner of the screen, over time. It's only really
suitable for use with images that are larger than the screen,
and we don't do any cropping on the image.
@param startpos: The initial coordinates of the upper-left
corner of the screen, relative to the image.
@param endpos: The coordinates of the upper-left corner of the
screen, relative to the image, after time has elapsed.
@param time: The time it takes to pan from startpos to endpos.
@param child: The child displayable.
@param repeat: True if we should repeat this forever.
@param bounce: True if we should bounce from the start to the end
to the start.
@param anim_timebase: True if we use the animation timebase, False to use the
displayable timebase.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
This can be used as a transition. See Motion for details.
"""
x0, y0 = startpos
x1, y1 = endpos
return Motion(Interpolate((-x0, -y0), (-x1, -y1)),
time,
child,
repeat=repeat,
bounce=bounce,
style=style,
anim_timebase=anim_timebase,
time_warp=time_warp,
add_sizes=True,
**properties)
def Move(startpos, endpos, time, child=None, repeat=False, bounce=False,
anim_timebase=False, style='motion', time_warp=None, **properties):
"""
This is used to pan over a child displayable relative to
the containing area. It works by interpolating the placement of the
the child, over time.
@param startpos: The initial coordinates of the child
relative to the containing area.
@param endpos: The coordinates of the child at the end of the
move.
@param time: The time it takes to move from startpos to endpos.
@param child: The child displayable.
@param repeat: True if we should repeat this forever.
@param bounce: True if we should bounce from the start to the end
to the start.
@param anim_timebase: True if we use the animation timebase, False to use the
displayable timebase.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
This can be used as a transition. See Motion for details.
"""
return Motion(Interpolate(startpos, endpos),
time,
child,
repeat=repeat,
bounce=bounce,
anim_timebase=anim_timebase,
style=style,
time_warp=time_warp,
add_sizes=True,
**properties)
class Revolver(object):
def __init__(self, start, end, child, around=(0.5, 0.5), cor=(0.5, 0.5), pos=None):
self.start = start
self.end = end
self.around = around
self.cor = cor
self.pos = pos
self.child = child
def __call__(self, t, (w, h, cw, ch)):
# Converts a float to an integer in the given range, passes
# integers through unchanged.
def fti(x, r):
if x is None:
x = 0
if isinstance(x, float):
return int(x * r)
else:
return x
if self.pos is None:
pos = self.child.get_placement()
else:
pos = self.pos
xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel = pos
xpos = fti(xpos, w)
ypos = fti(ypos, h)
xanchor = fti(xanchor, cw)
yanchor = fti(yanchor, ch)
xaround, yaround = self.around
xaround = fti(xaround, w)
yaround = fti(yaround, h)
xcor, ycor = self.cor
xcor = fti(xcor, cw)
ycor = fti(ycor, ch)
angle = self.start + (self.end - self.start) * t
angle *= math.pi / 180
# The center of rotation, relative to the xaround.
x = xpos - xanchor + xcor - xaround
y = ypos - yanchor + ycor - yaround
# Rotate it.
nx = x * math.cos(angle) - y * math.sin(angle)
ny = x * math.sin(angle) + y * math.cos(angle)
# Project it back.
nx = nx - xcor + xaround
ny = ny - ycor + yaround
return (renpy.display.core.absolute(nx), renpy.display.core.absolute(ny), 0, 0)
def Revolve(start, end, time, child, around=(0.5, 0.5), cor=(0.5, 0.5), pos=None, **properties):
return Motion(Revolver(start, end, child, around=around, cor=cor, pos=pos),
time,
child,
add_sizes=True,
**properties)
class Zoom(renpy.display.core.Displayable):
"""
This displayable causes a zoom to take place, using image
scaling. The render of this displayable is always of the supplied
size. The child displayable is rendered, and a rectangle is
cropped out of it. This rectangle is interpolated between the
start and end rectangles. The rectangle is then scaled to the
supplied size. The zoom will take time seconds, after which it
will show the end rectangle, unless an after_child is
given.
The algorithm used for scaling does not perform any
interpolation or other smoothing.
"""
def __init__(self, size, start, end, time, child,
after_child=None, time_warp=None,
bilinear=True, opaque=True,
anim_timebase=False,
repeat=False,
style='motion',
**properties):
"""
@param size: The size that the rectangle is scaled to, a
(width, height) tuple.
@param start: The start rectangle, an (xoffset, yoffset,
width, height) tuple.
@param end: The end rectangle, an (xoffset, yoffset,
width, height) tuple.
@param time: The amount of time it will take to
interpolate from the start to the end rectange.
@param child: The child displayable.
@param after_child: If present, a second child
widget. This displayable will be rendered after the zoom
completes. Use this to snap to a sharp displayable after
the zoom is done.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
"""
super(Zoom, self).__init__(style=style, **properties)
child = renpy.easy.displayable(child)
self.size = size
self.start = start
self.end = end
self.time = time
self.done = 0.0
self.child = child
self.repeat = repeat
if after_child:
self.after_child = renpy.easy.displayable(after_child)
else:
if self.end == 1.0:
self.after_child = child
else:
self.after_child = None
self.time_warp = time_warp
self.bilinear = bilinear
self.opaque = opaque
self.anim_timebase = anim_timebase
def visit(self):
return [ self.child, self.after_child ]
def render(self, width, height, st, at):
if self.anim_timebase:
t = at
else:
t = st
if self.time:
done = min(t / self.time, 1.0)
else:
done = 1.0
if self.repeat:
done = done % 1.0
if renpy.game.less_updates:
done = 1.0
self.done = done
if self.after_child and done == 1.0:
return renpy.display.render.render(self.after_child, width, height, st, at)
if self.time_warp:
done = self.time_warp(done)
rend = renpy.display.render.render(self.child, width, height, st, at)
surf = rend.pygame_surface()
rect = tuple([ (1.0 - done) * a + done * b for a, b in zip(self.start, self.end) ])
# Check for inclusion, report an error otherwise.
rx, ry, rw, rh = rect
if rx < 0 or ry < 0 or rx + rw > rend.width or ry + rh > rend.height:
raise Exception("Zoom rectangle %r falls outside of %dx%d parent surface." % (rect, rend.width, rend.height))
rv = zoom_core(rend, surf, rect, self.size[0], self.size[1], self.bilinear, self.opaque)
if self.done < 1.0:
renpy.display.render.redraw(self, 0)
return rv
def event(self, ev, x, y, st):
if self.done == 1.0:
return self.child.event(ev, x, y, st)
else:
return None
def zoom_core(rend, surf, rect, neww, newh, bilinear, opaque):
if bilinear and opaque:
def draw(dest, x, y, surf=surf, rect=rect, neww=neww, newh=newh):
# Find the part of dest we must draw to. Realize x and y
# are negative or 0.
sx, sy, sw, sh = rect
dw, dh = dest.get_size()
subw = min(neww + x, dw)
subh = min(newh + y, dh)
if subw <= 0 or subh <= 0:
return
dest = dest.subsurface((0, 0, subw, subh))
renpy.display.module.bilinear_scale(surf, dest,
sx, sy, sw, sh,
-x, -y, neww, newh,
precise=1)
rv = renpy.display.render.Render(neww, newh, draw_func=draw, opaque=True)
else:
if bilinear:
sx, sy, sw, sh = rect
scalesurf = renpy.display.pgrender.surface((neww, newh), True)
renpy.display.module.bilinear_scale(surf, scalesurf,
sx, sy, sw, sh,
0, 0, neww, newh,
precise=1)
else:
renpy.display.render.blit_lock.acquire()
scalesurf = renpy.display.pgrender.transform_scale(surf.subsurface(rect), (neww, newh))
renpy.display.render.blit_lock.release()
renpy.display.render.mutated_surface(scalesurf)
rv = renpy.display.render.Render(neww, newh)
rv.blit(scalesurf, (0, 0))
rv.depends_on(rend)
return rv
class FactorZoom(renpy.display.core.Displayable):
def __init__(self, start, end, time, child,
after_child=None, time_warp=None,
bilinear=True, opaque=True,
anim_timebase=False,
repeat=False,
style='motion',
**properties):
super(FactorZoom, self).__init__(style=style, **properties)
child = renpy.easy.displayable(child)
self.start = start
self.end = end
self.time = time
self.child = child
self.repeat = repeat
if after_child:
self.after_child = renpy.easy.displayable(after_child)
else:
if self.end == 1.0:
self.after_child = child
else:
self.after_child = None
self.time_warp = time_warp
self.bilinear = bilinear
self.opaque = opaque
self.done = 0.0
self.anim_timebase = anim_timebase
def visit(self):
return [ self.child, self.after_child ]
def render(self, width, height, st, at):
if self.anim_timebase:
t = at
else:
t = st
if self.time:
done = min(t / self.time, 1.0)
else:
done = 1.0
if self.repeat:
done = done % 1.0
if renpy.game.less_updates:
done = 1.0
self.done = done
if self.after_child and done == 1.0:
return renpy.display.render.render(self.after_child, width, height, st, at)
if self.time_warp:
done = self.time_warp(done)
rend = renpy.display.render.render(self.child, width, height, st, at)
surf = rend.pygame_surface()
factor = self.start * (1.0 - done) + self.end * done
oldw, oldh = surf.get_size()
neww = int(oldw * factor)
newh = int(oldh * factor)
rv = zoom_core(rend, surf, (0, 0, oldw, oldh), neww, newh, self.bilinear, self.opaque)
if self.done < 1.0:
renpy.display.render.redraw(self, 0)
return rv
def event(self, ev, x, y, st):
if self.done == 1.0 and self.after_child:
return self.after_child.event(ev, x, y, st)
else:
return None
class SizeZoom(renpy.display.core.Displayable):
def __init__(self, start, end, time, child,
after_child=None, time_warp=None,
bilinear=True, opaque=True,
anim_timebase=False,
repeat=False,
style='motion',
**properties):
super(SizeZoom, self).__init__(style=style, **properties)
child = renpy.easy.displayable(child)
self.start = start
self.end = end
self.time = time
self.child = child
self.repeat = repeat
if after_child:
self.after_child = renpy.easy.displayable(after_child)
else:
if self.end == (1.0, 1.0):
self.after_child = child
else:
self.after_child = None
self.time_warp = time_warp
self.bilinear = bilinear
self.opaque = opaque
self.done = 0.0
self.anim_timebase = anim_timebase
def visit(self):
return [ self.child, self.after_child ]
def render(self, width, height, st, at):
if self.anim_timebase:
t = at
else:
t = st
if self.time:
done = min(t / self.time, 1.0)
else:
done = 1.0
if self.repeat:
done = done % 1.0
if renpy.game.less_updates:
done = 1.0
self.done = done
if self.after_child and done == 1.0:
return renpy.display.render.render(self.after_child, width, height, st, at)
if self.time_warp:
done = self.time_warp(done)
rend = renpy.display.render.render(self.child, width, height, st, at)
surf = rend.pygame_surface()
sx, sy = self.start
ex, ey = self.end
neww = int(sx + (ex - sx) * done)
newh = int(sy + (ey - sy) * done)
oldw, oldh = surf.get_size()
rv = zoom_core(rend, surf, (0, 0, oldw, oldh), neww, newh, self.bilinear, self.opaque)
if self.done < 1.0:
renpy.display.render.redraw(self, 0)
return rv
def event(self, ev, x, y, st):
if self.done == 1.0 and self.after_child:
return self.after_child.event(ev, x, y, st)
else:
return None
class RotoZoom(renpy.display.core.Displayable):
def __init__(self,
rot_start, rot_end, rot_delay,
zoom_start, zoom_end, zoom_delay,
child,
rot_repeat=False, zoom_repeat=False,
rot_bounce=False, zoom_bounce=False,
rot_anim_timebase=False, zoom_anim_timebase=False,
rot_time_warp=None, zoom_time_warp=None,
opaque=False,
style='motion',
**properties):
super(RotoZoom, self).__init__(style=style, **properties)
self.rot_start = rot_start
self.rot_end = rot_end
self.rot_delay = rot_delay
self.zoom_start = zoom_start
self.zoom_end = zoom_end
self.zoom_delay = zoom_delay
self.child = renpy.easy.displayable(child)
self.rot_repeat = rot_repeat
self.zoom_repeat = zoom_repeat
self.rot_bounce = rot_bounce
self.zoom_bounce = zoom_bounce
self.rot_anim_timebase = rot_anim_timebase
self.zoom_anim_timebase = zoom_anim_timebase
self.rot_time_warp = rot_time_warp
self.zoom_time_warp = zoom_time_warp
self.opaque = opaque
def visit(self):
return [ self.child ]
def render(self, w, h, st, at):
if self.rot_anim_timebase:
rot_time = at
else:
rot_time = st
if self.zoom_anim_timebase:
zoom_time = at
else:
zoom_time = st
if self.rot_delay == 0:
rot_time = 1.0
else:
rot_time /= self.rot_delay
if self.zoom_delay == 0:
zoom_time = 1.0
else:
zoom_time /= self.zoom_delay
if self.rot_repeat:
rot_time %= 1.0
if self.zoom_repeat:
zoom_time %= 1.0
if self.rot_bounce:
rot_time *= 2
rot_time = min(rot_time, 2.0 - rot_time)
if self.zoom_bounce:
zoom_time *= 2
zoom_time = min(zoom_time, 2.0 - zoom_time)
if renpy.game.less_updates:
rot_time = 1.0
zoom_time = 1.0
if rot_time <= 1.0 or zoom_time <= 1.0:
renpy.display.render.redraw(self, 0)
rot_time = min(rot_time, 1.0)
zoom_time = min(zoom_time, 1.0)
if self.rot_time_warp:
rot_time = self.rot_time_warp(rot_time)
if self.zoom_time_warp:
zoom_time = self.zoom_time_warp(zoom_time)
angle = self.rot_start + (1.0 * self.rot_end - self.rot_start) * rot_time
zoom = self.zoom_start + (1.0 * self.zoom_end - self.zoom_start) * zoom_time
angle = -angle * math.pi / 180
zoom = max(zoom, 0.001)
child_rend = renpy.display.render.render(self.child, w, h, st, at)
surf = child_rend.pygame_surface(True)
cw, ch = child_rend.get_size()
# Figure out the size of the target.
dw = math.hypot(cw, ch) * zoom
dh = dw
# We shrink the size by one, since we can't access these pixels.
# cw -= 1
# ch -= 1
# Figure out the various components of the rotation.
xdx = math.cos(angle) / zoom
xdy = -math.sin(angle) / zoom
ydx = -xdy # math.sin(angle) / zoom
ydy = xdx # math.cos(angle) / zoom
def draw(dest, xo, yo):
target = dest
dulcx = -dw / 2.0 - xo
dulcy = -dh / 2.0 - yo
culcx = cw / 2.0 + xdx * dulcx + xdy * dulcy
culcy = ch / 2.0 + ydx * dulcx + ydy * dulcy
renpy.display.module.transform(surf, target,
culcx, culcy,
xdx, ydx, xdy, ydy,
1.0, True)
rv = renpy.display.render.Render(dw, dh, draw_func=draw, opaque=self.opaque)
rv.depends_on(child_rend)
return rv
# For compatibility with old games.
renpy.display.layout.Transform = Transform
renpy.display.layout.RotoZoom = RotoZoom
renpy.display.layout.SizeZoom = SizeZoom
renpy.display.layout.FactorZoom = FactorZoom
renpy.display.layout.Zoom = Zoom
renpy.display.layout.Revolver = Revolver
renpy.display.layout.Motion = Motion
renpy.display.layout.Interpolate = Interpolate
# Leave these functions around - they might have been pickled somewhere.
renpy.display.layout.Revolve = Revolve # function
renpy.display.layout.Move = Move # function
renpy.display.layout.Pan = Pan # function
| MSEMJEJME/ReAlistair | renpy/display/motion.py | Python | gpl-2.0 | 44,134 | [
"VisIt"
] | bb6190a36897748fce41028617e6972233b4add2f8806595ea847f5b6942e302 |
"""Core API shared by cladecompare and cladeweb for running the algorithms."""
from __future__ import division
import contextlib
import logging
import os
import subprocess
import tempfile
# from cStringIO import StringIO
from copy import deepcopy
from math import fsum, log10
from os.path import basename
from Bio import AlignIO, SeqIO
from Bio.Align import MultipleSeqAlignment
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_protein
from Bio.File import as_handle
from Bio._py3k import StringIO
from biofrills import alnutils, consensus
from . import pairlogo, pmlscript, urn, gtest, jsd, phospho, hypg
from .shared import combined_frequencies
GAP_THRESH = 0.8
PSEUDO_SIZE = 0.5
# ---- FLOW --------------------------------------------------------------
def process_args(args):
"""Main."""
if args.mapgaps or args.hmm:
# run_gaps requires FASTA input
assert args.format == 'fasta', \
"Input sequence format must be FASTA."
all_alns = []
for alnfname in [args.foreground] + args.background:
if args.hmm:
logging.info("Aligning %s with HMM profile %s",
alnfname, args.hmm)
aln = hmm_align_and_read(args.hmm, alnfname)
elif args.mapgaps:
logging.info("Aligning %s with MAPGAPS profile %s",
alnfname, args.mapgaps)
aln = mapgaps_align_and_read(args.mapgaps, alnfname)
else:
aln = read_aln(alnfname, args.format)
all_alns.append(aln)
pdb_data = []
if args.pdb:
if args.hmm:
for pdbfname in args.pdb:
logging.info("Aligning %s with HMM profile %s",
pdbfname, args.hmm)
pdb_rec, pdb_resnums, pdb_inserts = pdb_hmm(args.hmm, pdbfname)
pdb_data.append((pdbfname, pdb_rec, pdb_resnums, pdb_inserts))
elif args.mapgaps:
for pdbfname in args.pdb:
logging.info("Aligning %s with MAPGAPS profile %s",
pdbfname, args.mapgaps)
pdb_rec, pdb_resnums, pdb_inserts = pdb_mapgaps(args.mapgaps,
pdbfname)
pdb_data.append((pdbfname, pdb_rec, pdb_resnums, pdb_inserts))
else:
logging.error("PDB alignment requires a MAPGAPS or HMM profile.")
# ENH - realign to fg, bg
# aln = read_aln(args.pdb, 'pdb-atom')
# ENH - put strategies in a dict, look up here
if args.strategy == 'gtest':
logging.info("Using G-test of amino acid frequencies")
module = gtest
elif args.strategy == 'urn':
logging.info("Using ball-in-urn statistical model")
module = urn
elif args.strategy == 'jsd':
logging.info("Using Jensen-Shannon divergence")
module = jsd
elif args.strategy == 'phospho':
logging.info("Using urn model for phosphorylatable residues")
module = phospho
elif args.strategy == 'hypg':
logging.info("Using hypergeometric model")
module = hypg
else:
raise ValueError("Unknown strategy: %s" % args.strategy)
if len(all_alns) == 1:
aln, hits = process_one(all_alns[0], module, args.weight)
process_output(aln, None, hits, args.alpha, args.output, args.pattern,
pdb_data, args.pmlout)
elif len(all_alns) == 2:
fg_clean, bg_clean, hits = process_pair(all_alns[0], all_alns[1],
module, args.weight)
process_output(fg_clean, bg_clean, hits, args.alpha,
args.output, args.pattern,
pdb_data, args.pmlout)
# args.pdb, pdb_rec, pdb_resnums, pdb_inserts)
else:
# Output fnames are based on fg filenames; ignore what's given
outfnames_ptnfnames = [(basename(alnfname) + '.out',
basename(alnfname) + '.pttrn')
for alnfname in ([args.foreground] +
args.background)]
for idx, fg_aln in enumerate(all_alns):
# Combine other alns into bg
_other_alns = all_alns[:idx] + all_alns[idx+1:]
bg_aln = deepcopy(_other_alns[0])
for otra in _other_alns[1:]:
bg_aln.extend(deepcopy(otra))
fg_clean, bg_clean, hits = process_pair(deepcopy(fg_aln), bg_aln,
module, args.weight)
outfname, ptnfname = outfnames_ptnfnames[idx]
process_output(fg_clean, bg_clean, hits, args.alpha,
outfname, ptnfname, pdb_data, args.pmlout)
# args.pdb,
# pdb_rec, pdb_resnums, pdb_inserts)
logging.info("Wrote %s and %s", outfname, ptnfname)
def process_pair(fg_aln, bg_aln, module, do_weight):
"""Calculate a mapping of alignment column positions to "contrast".
Return a list of tuples:
(foreground consensus aa, background consensus aa, p-value)
for each column position.
"""
fg_aln, bg_aln = clean_alignments(fg_aln, bg_aln)
if do_weight:
fg_weights = alnutils.sequence_weights(fg_aln, 'none')
bg_weights = alnutils.sequence_weights(bg_aln, 'none')
else:
fg_weights = [1 for i in range(len(fg_aln))]
bg_weights = [1 for i in range(len(bg_aln))]
fg_size = fsum(fg_weights) if module != urn else len(fg_aln)
bg_size = fsum(bg_weights)
# Overall aa freqs for pseudocounts
aa_freqs = combined_frequencies(fg_aln, fg_weights, bg_aln, bg_weights)
fg_cons = consensus.consensus(fg_aln, weights=fg_weights, trim_ends=False,
gap_threshold=GAP_THRESH)
bg_cons = consensus.consensus(bg_aln, weights=bg_weights, trim_ends=False,
gap_threshold=GAP_THRESH)
hits = []
for faa, baa, fg_col, bg_col in zip(fg_cons, bg_cons,
zip(*fg_aln), zip(*bg_aln)):
if faa == '-' or baa == '-':
# Ignore indel columns -- there are better ways to look at these
pvalue = 1.
else:
pvalue = module.compare_cols(
fg_col, faa, fg_size, fg_weights,
bg_col, baa, bg_size, bg_weights,
aa_freqs, PSEUDO_SIZE)
hits.append((faa, baa, pvalue))
return fg_aln, bg_aln, hits
def process_one(aln, module, do_weight):
"""Calculate a mapping of alignment column positions to "contrast"."""
if do_weight:
weights = alnutils.sequence_weights(aln, 'none')
# if module != jsd else 'sum1')
else:
weights = [1 for i in range(len(aln))]
aln_size = fsum(weights) if module != urn else len(aln)
aa_freqs = alnutils.aa_frequencies(aln, weights, gap_chars='-.X')
cons = consensus.consensus(aln, weights=weights, trim_ends=False,
gap_threshold=GAP_THRESH)
hits = []
for cons_aa, col in zip(cons, zip(*aln)):
if cons_aa == '-':
# Ignore indel columns -- there are better ways to look at these
pvalue = 1.
else:
pvalue = module.compare_one(col, cons_aa, aln_size, weights,
aa_freqs, PSEUDO_SIZE)
hits.append((cons_aa, '_', pvalue))
return aln, hits
def process_output(fg_aln, bg_aln, hits, alpha, output, pattern, pdb_data,
pml_output=None):
"""Generate the output files from the processed data."""
with as_handle(output, 'w+') as outfile:
write_pvalues(hits, outfile, alpha)
tophits = top_hits(hits, alpha)
if pattern:
with open(pattern, 'w+') as ptnfile:
write_mcbpps(tophits, ptnfile)
# XXX hack: don't make pairlogo in single mode
if bg_aln:
pairlogo.make_pairlogos(fg_aln, bg_aln, tophits,
pattern.rsplit('.', 1)[0],
10)
if pdb_data:
patterns = [t[0] for t in tophits]
if len(pdb_data) == 1:
# Single-PBD mode
pdb_fname, pdb_rec, pdb_resnums, pdb_inserts = pdb_data[0]
script = pmlscript.build_single(pdb_resnums, pdb_inserts,
patterns, pdb_fname,
pdb_rec.annotations['chain'])
if not pml_output:
pml_output = pdb_fname + ".pml"
else:
# Multi-PBD mode
pdb_fnames, pdb_recs, pdb_resnumses, pdb_insertses = zip(*pdb_data)
script = pmlscript.build_multi(pdb_resnumses, pdb_insertses,
patterns, pdb_fnames,
[r.annotations['chain']
for r in pdb_recs])
if not pml_output:
pml_output = pdb_fnames[0] + "-etc.pml"
with open(pml_output, 'w+') as pmlfile:
pmlfile.write(script)
logging.info("Wrote %s", pml_output)
# --- Output ---
def write_pvalues(hits, outfile, alpha):
"""Write p-values & "contrast" stars for each site. (It's noisy.)"""
for idx, data in enumerate(hits):
fg_char, bg_char, pvalue = data
if not (0.0 <= pvalue <= 1.0):
logging.warn("Out-of-domain p-value at site %s: %s",
idx, pvalue)
stars = ('*'*int(-log10(pvalue)) if 0 < pvalue < alpha else '')
outfile.write("%s (%s) %d : prob=%g\t%s\n"
% (fg_char, bg_char, idx + 1, pvalue, stars))
def write_mcbpps(tophits, ptnfile):
"""Write a .pttrn file in the style of mcBPPS."""
ptnfile.write("1:" + ','.join([("%s%d" % (faa, posn))
for posn, faa, baa in tophits]))
# --- Input magic ---
def call_quiet(*args):
"""Safely run a command and get stdout; print stderr if there's an error.
Like subprocess.check_output, but silent in the normal case where the
command logs unimportant stuff to stderr. If there is an error, then the
full error message(s) is shown in the exception message.
"""
# args = map(str, args)
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("Subprocess command failed:\n$ %s\n\n%s"
% (' '.join(args), out + err))
return out
def hmm_align_and_read(hmm_profile, fasta_fname):
"""Align a FASTA file with HMMer 3 and read the alignment."""
out = call_quiet('hmmalign', '--allcol', '--trim', '--amino',
'--outformat', 'a2m', hmm_profile, fasta_fname)
# ENH: write to file, then parse incrementally
records = list(SeqIO.parse(StringIO(out), 'fasta'))
# Remove inserts, i.e. lowercase characters
for rec in records:
rec.seq._data = ''.join([c for c in str(rec.seq) if not c.islower()])
return MultipleSeqAlignment(records, generic_protein)
def mapgaps_align_and_read(mapgaps_profile, fasta_fname):
"""Align a FASTA file with MAPGAPS and read the CMA alignment."""
call_quiet('run_gaps', mapgaps_profile, fasta_fname)
aln = cma_blocks(fasta_fname + '_aln.cma')
return aln
def cma_blocks(cma_fname):
"""Select the conserved/consensus columns in the alignment.
This removes inserts relative to the consensus sequence.
Return a Biopython MultipleSeqAlignment.
"""
records = []
with open(cma_fname) as infile:
lines = iter(infile)
for line in lines:
if line.startswith('>'):
acc = line.split(None, 1)[0][1:]
seq = next(lines).strip()
seq = ''.join((c for c in seq[3:-4] if not c.islower()))
records.append(
SeqRecord(Seq(seq, generic_protein),
id=acc, description=''))
return MultipleSeqAlignment(records, generic_protein)
def read_aln(fname, format):
"""Read a sequence alignment."""
if format:
assert format.islower()
if fname.endswith('.cma') or format == 'cma':
return cma_blocks(fname)
else:
aln = AlignIO.read(fname, format)
# Avoids trouble w/ cogent later on
for rec in aln:
rec.description = ''
return aln
def combine_alignments(fg_aln, bg_aln):
"""Align FG and BG to each other so column numbers match.
Uses MUSCLE for profile-profile alignment.
"""
# This would be simpler with NamedTemporaryFile, but Windows doesn't allow
# multiple open file handles on the same file, so here we are.
afd, aseqfname = tempfile.mkstemp(text=True)
os.close(afd)
bfd, bseqfname = tempfile.mkstemp(text=True)
os.close(bfd)
try:
AlignIO.write(fg_aln, aseqfname, 'fasta')
AlignIO.write(bg_aln, bseqfname, 'fasta')
output = call_quiet('muscle', '-profile',
'-in1', aseqfname, '-in2', bseqfname)
finally:
if os.path.exists(aseqfname):
os.remove(aseqfname)
if os.path.exists(bseqfname):
os.remove(bseqfname)
full_aln = AlignIO.read(StringIO(output), 'fasta')
full_aln = MultipleSeqAlignment(alnutils.remove_empty_cols(full_aln),
generic_protein)
# Save a copy
# ENH: choose a reasonable name
AlignIO.write(full_aln, '_cc_combined.seq', 'fasta')
logging.info("Wrote _cc_combined.seq")
return full_aln
def clean_alignments(fg_aln, bg_aln):
"""Fix simple issues in the alignments:
- Remove duplicated sequences and IDs from the background
- Ensure alignments are the same width (if not, align to each other)
- Remove all-gap columns from the full alignment
"""
if not len(fg_aln):
raise ValueError("Foreground set is empty")
if not len(bg_aln):
raise ValueError("Background set is empty")
# Remove FG seqs from BG -- by equal sequence IDs, here
killme = []
for fg_seq in fg_aln:
for idx, bg_seq in enumerate(bg_aln):
if fg_seq.id == bg_seq.id != 'consensus':
if str(fg_seq.seq) != str(bg_seq.seq):
logging.warn("Different sequences for %s in fg, bg",
fg_seq.id)
killme.append(idx)
if killme:
logging.info("Removing %d duplicated sequence IDs from the background",
len(killme))
for idx in sorted(killme, reverse=True):
del bg_aln._records[idx]
if not len(bg_aln):
raise ValueError("Background set is a subset of the foreground. "
"To fix this, use a background containing sequences "
"not in the foreground.")
# Remove identical sequences from the FG and BG
def purge_duplicates(aln, seen=set()):
out_recs = []
for rec in aln:
if str(rec.seq) not in seen:
out_recs.append(rec)
seen.add(str(rec.seq))
diff_fg = len(aln) - len(out_recs)
if diff_fg:
logging.warn("Purging %d identical sequences from the alignment",
diff_fg)
return out_recs
fg_aln._records = purge_duplicates(fg_aln)
bg_aln._records = purge_duplicates(bg_aln)
# Ensure alignments are the same width
if len(fg_aln[0]) != len(bg_aln[0]):
logging.warn("Alignments are not of equal width; fixing with MUSCLE.")
full_aln = combine_alignments(fg_aln, bg_aln)
else:
full_aln = deepcopy(fg_aln)
full_aln.extend(bg_aln)
# Remove columns that are all gaps in both fg and bg (full_aln)
seqstrs = [str(rec.seq) for rec in full_aln]
clean_cols = [col for col in zip(*seqstrs)
if not all(c == '-' for c in col)]
clean_seqs = [''.join(row) for row in zip(*clean_cols)]
for rec, clean_seq in zip(full_aln, clean_seqs):
rec.seq = Seq(clean_seq, rec.seq.alphabet)
# Split the full alignment back into FG and BG sets
fg_labels = set([seq.id for seq in fg_aln])
fg_recs = []
bg_recs = []
for rec in full_aln:
if rec.id in fg_labels:
fg_recs.append(rec)
else:
bg_recs.append(rec)
fg_aln._records = fg_recs
bg_aln._records = bg_recs
return fg_aln, bg_aln
def top_hits(hits, alpha, N=50):
"""Take the top (up to N) hits with corrected p-value <= alpha.
Return a list of triplets, sorted by significance:
(position, fg_aa, bg_aa)
"""
hit_quads = [(i+1, faa_baa_pval[0], faa_baa_pval[1], faa_baa_pval[2])
for i, faa_baa_pval in enumerate(hits)]
get_pval = lambda ifbp: ifbp[3]
# Benjamini-Hochberg multiple-testing FDR correction (BH step-up)
hit_quads.sort(key=get_pval)
m = len(hit_quads) # Num. hypotheses tested
if m < N:
N = m
tophits = [(posn, faa, baa) for posn, faa, baa, pval in hit_quads]
for k, ifbp in zip(range(m, 0, -1), reversed(hit_quads))[-N:]:
# logging.info("BH: a=%s, m=%s, k=%s, p=%s, compare=%s",
# alpha, m, k, get_pval(ifbp), alpha * k / m)
if get_pval(ifbp) <= alpha * k / m:
return tophits[:k]
return []
# --- PDB alignment magic ---
# ENH: handle multiple PDBs
@contextlib.contextmanager
def read_pdb_seq(pdb_fname):
"""Return the name of a temporary file containing the PDB atom sequence(s),
a list of the sequences themselves (as SeqRecords), and a same-length list
of tuples of (chain ID, chain start resnum, chain end resnum).
Context manager, so temporary file is automatically removed.
"""
pdbseqs = list(SeqIO.parse(pdb_fname, 'pdb-atom'))
try:
_fd, pdbseqfname = tempfile.mkstemp()
SeqIO.write(pdbseqs, pdbseqfname, 'fasta')
yield pdbseqfname, pdbseqs
finally:
os.close(_fd)
if os.path.exists(pdbseqfname):
os.remove(pdbseqfname)
def choose_best_aligned(aligned):
"""Choose the longest profile match as the "reference" chain.
Returns a tuple: (sequence ID, sequence string)
"""
def aligned_len(seq):
return sum(c.isupper() for c in seq.replace('X', ''))
if not aligned:
raise RuntimeError("No PDB sequences were aligned by the profile!")
elif len(aligned) == 1:
ref_id, ref_aln = aligned.items()[0]
else:
ref_id = max(aligned.iteritems(), key=lambda kv: aligned_len(kv[1]))
ref_aln = aligned[ref_id]
return ref_id, ref_aln
def get_aln_offset(full, aln):
aln_trim = aln.replace('-', '').replace('.', '').upper()
if 'X' in aln_trim:
aln_trim = aln_trim[:aln_trim.index('X')]
return full.index(aln_trim)
def aln_resnums_inserts(record, aln, offset):
"""Return two lists: residue numbers for model columns; inserts."""
aln_resnums = []
aln_inserts = []
in_insert = False
del_len = 0
curr_ins_start = None
for i, c in enumerate(aln):
if c.islower():
if not in_insert:
# Start of a new insert region
curr_ins_start = offset + i + 1 - del_len
in_insert = True
continue
if in_insert:
# End of the current insert region
aln_inserts.append((curr_ins_start, offset + i - del_len))
in_insert = False
if c.isupper():
# Match position
aln_resnums.append((c, offset + i - del_len + 1))
elif c == '-':
# Deletion position
aln_resnums.append(('-', None))
del_len += 1
else:
raise ValueError("Unexpected character '%s'" % c)
return aln_resnums, aln_inserts
def pdb_hmm(hmm_profile, pdb_fname):
"""Align a PDB structure to an HMM profile.
Returns a tuple: (SeqRecord,
//chain ID,
list of aligned residue numbers,
list of insert ranges as tuple pairs)
"""
with read_pdb_seq(pdb_fname) as (seqfname, seqs):
out = call_quiet('hmmalign', '--allcol', '--trim', '--amino',
'--outformat', 'a2m', hmm_profile, seqfname)
ref_id, ref_aln = choose_best_aligned(
dict((rec.id, str(rec.seq))
for rec in SeqIO.parse(StringIO(out), 'fasta')))
ref_record = SeqIO.to_dict(seqs)[ref_id]
# Calculate aligned residue numbers & insert ranges
offset = (ref_record.annotations['start']
+ get_aln_offset(str(ref_record.seq), ref_aln)
- 1)
resnums, inserts = aln_resnums_inserts(ref_record, ref_aln, offset)
return ref_record, resnums, inserts
def pdb_mapgaps(mapgaps_profile, pdb_fname):
"""Align a PDB structure to a MAPGAPS profile.
Returns a tuple: (SeqRecord, list of aligned residue numbers)
"""
from biocma import cma
with read_pdb_seq(pdb_fname) as (seqfname, seqs):
call_quiet('run_gaps', mapgaps_profile, seqfname)
pdb_cma = cma.read(seqfname + '_aln.cma')
hits = {}
head_lengths = {}
for seq in pdb_cma['sequences']:
hits[seq['id']] = seq['seq']
head_lengths[seq['id']] = seq['head_len']
ref_id, ref_aln = choose_best_aligned(hits)
ref_record = SeqIO.to_dict(seqs)[ref_id]
offset = (ref_record.annotations['start']
+ head_lengths[ref_id]
- 1)
resnums, inserts = aln_resnums_inserts(ref_record, ref_aln, offset)
return ref_record, resnums, inserts
| etal/cladecompare | cladecomparelib/core.py | Python | bsd-2-clause | 21,918 | [
"Biopython"
] | 9e4344642251aca0e52c249ff7778029e15bfcbed99e34a742ff2816f6f9116b |
from trbm_base import TRBMBase
from copy import deepcopy
import numpy as np
class TRBM(TRBMBase):
def __init__(self, number_visible_units=1, number_hidden_units=1, order=1, network=None):
self.visible_values = list()
self.hidden_values = list()
self.visible_to_visible_bias = list()
self.visible_to_hidden_bias = list()
self.hidden_to_hidden_bias = list()
if network == None:
self.number_visible_units = number_visible_units
self.number_hidden_units = number_hidden_units
self.order = order
for _ in xrange(order+1):
self.visible_values.append(np.zeros((number_visible_units,1)))
self.hidden_values.append(np.zeros((number_hidden_units,1)))
#visible to hidden connections at time t
self.connection_weights = np.random.rand(number_visible_units, number_hidden_units) * 0.05
#bias at time t
self.visible_bias = np.random.rand(number_visible_units,1)
self.hidden_bias = np.random.rand(number_hidden_units,1)
#bias propagated from previous time steps
for _ in xrange(order):
visible_to_visible = np.random.rand(number_visible_units, number_visible_units) * 0.05
visible_to_hidden = np.random.rand(number_visible_units, number_hidden_units) * 0.05
hidden_to_hidden = np.random.rand(number_hidden_units, number_hidden_units) * 0.05
self.visible_to_visible_bias.append(visible_to_visible)
self.visible_to_hidden_bias.append(visible_to_hidden)
self.hidden_to_hidden_bias.append(hidden_to_hidden)
else:
self.number_visible_units = network.number_visible_units
self.number_hidden_units = network.number_hidden_units
self.order = network.order
for _ in xrange(order+1):
self.visible_values.append(np.zeros((network.number_visible_units,1)))
self.hidden_values.append(np.zeros((network.number_hidden_units,1)))
#visible to hidden connections at time t
self.connection_weights = np.array(network.connection_weights)
#bias at time t
self.visible_bias = np.array(network.visible_bias)
self.hidden_bias = np.array(network.hidden_bias)
#bias propagated from previous time steps
for i in xrange(order):
self.visible_to_visible_bias.append(np.array(network.visible_to_visible_bias[i]))
self.visible_to_hidden_bias.append(np.array(network.visible_to_hidden_bias[i]))
self.hidden_to_hidden_bias.append(np.array(network.hidden_to_hidden_bias[i]))
def __deepcopy__(self, memo):
return TRBM(network=self)
def train(self, data, epochs=100, learning_rate=0.1):
"""Trains the Boltzmann machine with a given set of training vectors.
Keyword arguments:
data -- A 'np.array' containing data for training the RBM. Each row of the array should be a training vector of dimension 'number_visible_units'.
epochs -- The number of iterations of the learning algorithm (default 100).
learning_rate -- The algorithm's learning rate (default).
"""
for _ in xrange(epochs):
for t in xrange(self.order):
self.visible_values[t] = self._copy_array(data[t], self.visible_values[t].shape)
self.hidden_values[t] = self._sample_initial(t,1)
number_training_vectors = data.shape[0]
for t in xrange(self.order,number_training_vectors):
self.visible_values[self.order] = self._copy_array(data[t], self.visible_values[self.order].shape)
#we sample the current hidden layer given the visible layer up to time t and the hidden layers up to time t-1
current_time_hidden_bias_values = self.connection_weights.T.dot(self.visible_values[self.order])
hidden_bias = self._bias_function_hidden()
for neuron in xrange(self.number_hidden_units):
prob = self._sigmoid(current_time_hidden_bias_values[neuron] + hidden_bias[neuron])
self.hidden_values[self.order][neuron] = prob
#we sample from the network
sample,_ = self._sample(1)
#we update the connection weights between the visible and hidden units of the current time step
for i in xrange(self.number_visible_units):
#we update the bias values of the visible units
visible_bias_delta = learning_rate * (self.visible_values[self.order][i] - sample[i])
self.visible_bias[i] = self.visible_bias[i] + visible_bias_delta
for j in xrange(self.number_hidden_units):
data_expectation = self._sigmoid(np.sum(self.connection_weights[:,j] * self.visible_values[self.order]) + self.hidden_bias[j])
sample_expectation = self._sigmoid(np.sum(self.connection_weights[:,j] * sample) + self.hidden_bias[j])
#we update the connection weight between the i-th visible unit and the j-th hidden unit
weight_change_delta = learning_rate * (data_expectation * self.visible_values[self.order][i] - sample_expectation * sample[i])
self.connection_weights[i,j] = self.connection_weights[i,j] + weight_change_delta
#we update the bias values of the hidden units
hidden_bias_delta = learning_rate * (data_expectation - sample_expectation)
self.hidden_bias[j] = self.hidden_bias[j] + hidden_bias_delta
#we update the visible to hidden connection weights between the current time step and the previous time steps
for n in xrange(self.order):
value_index = self.order - n - 1
sample,_ = self._sample(1,self.visible_values[value_index])
for i in xrange(self.number_visible_units):
for j in xrange(self.number_hidden_units):
data_expectation = self._sigmoid(np.sum(self.visible_to_hidden_bias[n][:,j] * self.visible_values[value_index]) + self.hidden_bias[j])
sample_expectation = self._sigmoid(np.sum(self.visible_to_hidden_bias[n][:,j] * sample) + self.hidden_bias[j])
#we update the connection weight between the i-th visible unit and the j-th hidden unit
weight_change_delta = learning_rate * (data_expectation * self.visible_values[value_index][i] - sample_expectation * sample[i])
self.visible_to_hidden_bias[n][i,j] = self.visible_to_hidden_bias[n][i,j] + weight_change_delta
#we update the visible to visible connection weights between the current time step and the previous time steps
for n in xrange(self.order):
value_index = self.order - n - 1
sample,_ = self._sample(1,self.visible_values[value_index])
for i in xrange(self.number_visible_units):
for j in xrange(self.number_visible_units):
data_expectation = self._sigmoid(np.sum(self.visible_to_visible_bias[n][:,j] * self.visible_values[value_index]) + self.visible_bias[j])
sample_expectation = self._sigmoid(np.sum(self.visible_to_visible_bias[n][:,j] * sample) + self.visible_bias[j])
#we update the connection weight between the i-th and the j-th visible unit
weight_change_delta = learning_rate * (data_expectation * self.visible_values[value_index][i] - sample_expectation * sample[i])
self.visible_to_visible_bias[n][i,j] = self.visible_to_visible_bias[n][i,j] + weight_change_delta
#we update the hidden to hidden connection weights between the current time step and the previous time steps
for n in xrange(self.order):
value_index = self.order - n - 1
_,sample = self._sample(1,self.visible_values[value_index])
for i in xrange(self.number_hidden_units):
for j in xrange(self.number_hidden_units):
data_expectation = self._sigmoid(np.sum(self.hidden_to_hidden_bias[n][:,j] * self.hidden_values[value_index]) + self.hidden_bias[j])
sample_expectation = self._sigmoid(np.sum(self.hidden_to_hidden_bias[n][:,j] * sample) + self.hidden_bias[j])
#we update the connection weight between the i-th and the j-th hidden unit
weight_change_delta = learning_rate * (data_expectation * self.hidden_values[value_index][i] - sample_expectation * sample[i])
self.hidden_to_hidden_bias[n][i,j] = self.hidden_to_hidden_bias[n][i,j] + weight_change_delta
#we move the visible vectors one time step back
self._shift_visible_vectors_back()
def initialise(self, initial_data):
for t in xrange(self.order):
self.visible_values[t] = self._copy_array(initial_data[t,:], self.visible_values[t].shape)
self.hidden_values[t] = self._sample_initial(t,1)
def sample_network(self, current_vector=None):
"""Samples a visible vector from the network.
Keyword arguments:
current_vector -- Data vector at time t given as a 'np.array' of dimension (number_visible_units).
initial_data -- Data used for initialising the network, given as a 'np.array' of dimension (number_visible_units,order) (default None, meaning that the network has already been initialised).
Returns:
visible_units -- A 'np.array' containing the sampled values.
"""
if current_vector == None:
for i in xrange(len(self.visible_values[self.order])):
self.visible_values[self.order][i] = np.random.rand()
else:
self.visible_values[self.order] = self._copy_array(current_vector, self.visible_values[self.order].shape)
visible_units = np.array(self.visible_values[self.order])
hidden_units = np.array(self.hidden_values[self.order])
current_time_visible_bias_values = self.connection_weights.dot(self.hidden_values[self.order])
visible_bias = self._bias_function_visible()
current_time_hidden_bias_values = self.connection_weights.T.dot(self.visible_values[self.order])
hidden_bias = self._bias_function_hidden()
for neuron in xrange(self.number_hidden_units):
prob = self._sigmoid(current_time_hidden_bias_values[neuron] + hidden_bias[neuron])
hidden_units[neuron] = prob
for neuron in xrange(self.number_visible_units):
prob = self._sigmoid(current_time_visible_bias_values[neuron] + visible_bias[neuron])
visible_units[neuron] = prob
self._shift_visible_vectors_back()
return visible_units
def _sample(self, k, training_vector=None):
"""Samples a visible vector and a hidden vector given a training vector.
Uses Contrastive Divergence for sampling the values.
Keyword arguments:
k -- The number of samples created by Contrastive Divergence before a sample is accepted.
training_vector -- A vector that should be used at the t-th time step. (default None, resulting in a vector already stored in self.visible_values[self.order]).
Returns:
visible_units -- A 'np.array' containing the sampled visible values.
hidden_units -- A 'np.array' containing the sampled hidden values.
"""
visible_units = None
if training_vector == None:
visible_units = np.array(self.visible_values[self.order])
else:
visible_units = np.array(training_vector)
hidden_units = np.array(self.hidden_values[self.order])
current_time_visible_bias_values = self.connection_weights.dot(self.hidden_values[self.order])
visible_bias = self._bias_function_visible()
current_time_hidden_bias_values = self.connection_weights.T.dot(self.visible_values[self.order])
hidden_bias = self._bias_function_hidden()
for sample in xrange(k):
for neuron in xrange(self.number_hidden_units):
prob = self._sigmoid(current_time_hidden_bias_values[neuron] + hidden_bias[neuron])
hidden_units[neuron] = prob
for neuron in xrange(self.number_visible_units):
prob = self._sigmoid(current_time_visible_bias_values[neuron] + visible_bias[neuron])
visible_units[neuron] = prob
return visible_units, hidden_units
def _sample_initial(self, t, k):
"""Samples a hidden layer given only on the visible vector at the current time step.
Uses Contrastive Divergence for sampling the values.
Keyword arguments:
t -- Current time step.
k -- The number of samples created by Contrastive Divergence before a sample is accepted.
Returns:
hidden_units -- A 'np.array' containing the sampled hidden values.
"""
visible_units = np.array(self.visible_values[t])
hidden_units = np.array(self.hidden_values[t])
for sample in xrange(k):
for neuron in xrange(self.number_hidden_units):
prob = self._sigmoid(np.sum(self.connection_weights[:,neuron] * visible_units) + self.hidden_bias[neuron])
hidden_units[neuron] = prob
for neuron in xrange(self.number_visible_units):
prob = self._sigmoid(np.sum(self.connection_weights[neuron,:] * hidden_units) + self.visible_bias[neuron])
visible_units[neuron] = prob
return hidden_units | aleksandar-mitrevski/fault_and_anomaly_detection | generative_model_fd/brain/machines/trbm.py | Python | bsd-2-clause | 14,133 | [
"NEURON"
] | 5f5d0898837bc85d7fabfcf645a6fdae54cbf1f7d6700eb15520c150f86e17b8 |
from galaxy.util import rst_to_html
def lint_help(tool_xml, lint_ctx):
root = tool_xml.getroot()
helps = root.findall("help")
if len(helps) > 1:
lint_ctx.error("More than one help section found, behavior undefined.")
return
if len(helps) == 0:
lint_ctx.warn("No help section found, consider adding a help section to your tool.")
return
help = helps[0].text or ''
if not help.strip():
lint_ctx.warn("Help section appears to be empty.")
return
lint_ctx.valid("Tool contains help section.")
invalid_rst = False
try:
rst_to_html(help)
except Exception as e:
invalid_rst = str(e)
if "TODO" in help:
lint_ctx.warn("Help contains TODO text.")
if invalid_rst:
lint_ctx.warn("Invalid reStructuredText found in help - [%s]." % invalid_rst)
else:
lint_ctx.valid("Help contains valid reStructuredText.")
| ssorgatem/pulsar | galaxy/tools/linters/help.py | Python | apache-2.0 | 939 | [
"Galaxy"
] | e4d93616068d5040eb78b10a70ce53d3b54035ade51565404f5f8d1520821dfa |
"""Provide the Reddit class."""
import asyncio
import configparser
import os
import re
import time
from itertools import islice
from logging import getLogger
from typing import (
IO,
TYPE_CHECKING,
Any,
Dict,
Generator,
Iterable,
Optional,
Type,
Union,
)
from warnings import warn
from prawcore import (
Authorizer,
DeviceIDAuthorizer,
ReadOnlyAuthorizer,
Redirect,
Requestor,
ScriptAuthorizer,
TrustedAuthenticator,
UntrustedAuthenticator,
session,
)
from prawcore.exceptions import BadRequest
from . import models
from .config import Config
from .const import API_PATH, USER_AGENT_FORMAT, __version__
from .exceptions import (
ClientException,
MissingRequiredAttributeException,
RedditAPIException,
)
from .objector import Objector
from .util.token_manager import BaseTokenManager
try:
from update_checker import update_check
UPDATE_CHECKER_MISSING = False
except ImportError: # pragma: no cover
UPDATE_CHECKER_MISSING = True
if TYPE_CHECKING: # pragma: no cover
import praw
Comment = models.Comment
Redditor = models.Redditor
Submission = models.Submission
Subreddit = models.Subreddit
logger = getLogger("praw")
class Reddit:
"""The Reddit class provides convenient access to Reddit's API.
Instances of this class are the gateway to interacting with Reddit's API through
PRAW. The canonical way to obtain an instance of this class is via:
.. code-block:: python
import praw
reddit = praw.Reddit(
client_id="CLIENT_ID",
client_secret="CLIENT_SECRET",
password="PASSWORD",
user_agent="USERAGENT",
username="USERNAME",
)
"""
update_checked = False
_ratelimit_regex = re.compile(r"([0-9]{1,3}) (milliseconds?|seconds?|minutes?)")
@property
def _next_unique(self) -> int:
value = self._unique_counter
self._unique_counter += 1
return value
@property
def read_only(self) -> bool:
"""Return ``True`` when using the ``ReadOnlyAuthorizer``."""
return self._core == self._read_only_core
@read_only.setter
def read_only(self, value: bool) -> None:
"""Set or unset the use of the ReadOnlyAuthorizer.
:raises: :class:`.ClientException` when attempting to unset ``read_only`` and
only the ``ReadOnlyAuthorizer`` is available.
"""
if value:
self._core = self._read_only_core
elif self._authorized_core is None:
raise ClientException(
"read_only cannot be unset as only the ReadOnlyAuthorizer is available."
)
else:
self._core = self._authorized_core
@property
def validate_on_submit(self) -> bool:
"""Get validate_on_submit.
.. deprecated:: 7.0
If property :attr:`.validate_on_submit` is set to ``False``, the behavior is
deprecated by Reddit. This attribute will be removed around May-June 2020.
"""
value = self._validate_on_submit
if value is False:
warn(
"Reddit will check for validation on all posts around May-June 2020. It"
" is recommended to check for validation by setting"
" reddit.validate_on_submit to True.",
category=DeprecationWarning,
stacklevel=3,
)
return value
@validate_on_submit.setter
def validate_on_submit(self, val: bool):
self._validate_on_submit = val
def __enter__(self):
"""Handle the context manager open."""
return self
def __exit__(self, *_args):
"""Handle the context manager close."""
def __init__(
self,
site_name: str = None,
config_interpolation: Optional[str] = None,
requestor_class: Optional[Type[Requestor]] = None,
requestor_kwargs: Dict[str, Any] = None,
*,
token_manager: Optional[BaseTokenManager] = None,
**config_settings: Union[str, bool],
): # noqa: D207, D301
"""Initialize a :class:`.Reddit` instance.
:param site_name: The name of a section in your ``praw.ini`` file from which to
load settings from. This parameter, in tandem with an appropriately
configured ``praw.ini``, file is useful if you wish to easily save
credentials for different applications, or communicate with other servers
running Reddit. If ``site_name`` is ``None``, then the site name will be
looked for in the environment variable ``praw_site``. If it is not found
there, the ``DEFAULT`` site will be used.
:param requestor_class: A class that will be used to create a requestor. If not
set, use ``prawcore.Requestor`` (default: ``None``).
:param requestor_kwargs: Dictionary with additional keyword arguments used to
initialize the requestor (default: ``None``).
:param token_manager: When provided, the passed instance, a subclass of
:class:`.BaseTokenManager`, will manage tokens via two callback functions.
This parameter must be provided in order to work with refresh tokens
(default: ``None``).
Additional keyword arguments will be used to initialize the :class:`.Config`
object. This can be used to specify configuration settings during instantiation
of the :class:`.Reddit` instance. For more details, please see
:ref:`configuration`.
Required settings are:
- client_id
- client_secret (for installed applications set this value to ``None``)
- user_agent
The ``requestor_class`` and ``requestor_kwargs`` allow for customization of the
requestor :class:`.Reddit` will use. This allows, e.g., easily adding behavior
to the requestor or wrapping its |Session|_ in a caching layer. Example usage:
.. |Session| replace:: ``Session``
.. _session: https://2.python-requests.org/en/master/api/#requests.Session
.. code-block:: python
import json
import betamax
import requests
from prawcore import Requestor
from praw import Reddit
class JSONDebugRequestor(Requestor):
def request(self, *args, **kwargs):
response = super().request(*args, **kwargs)
print(json.dumps(response.json(), indent=4))
return response
my_session = betamax.Betamax(requests.Session())
reddit = Reddit(
..., requestor_class=JSONDebugRequestor, requestor_kwargs={"session": my_session}
)
"""
self._core = self._authorized_core = self._read_only_core = None
self._objector = None
self._token_manager = token_manager
self._unique_counter = 0
self._validate_on_submit = False
try:
config_section = site_name or os.getenv("praw_site") or "DEFAULT"
self.config = Config(
config_section, config_interpolation, **config_settings
)
except configparser.NoSectionError as exc:
help_message = (
"You provided the name of a praw.ini configuration which does not"
" exist.\n\nFor help with creating a Reddit instance,"
" visit\nhttps://praw.readthedocs.io/en/latest/code_overview/reddit_instance.html\n\nFor"
" help on configuring PRAW,"
" visit\nhttps://praw.readthedocs.io/en/latest/getting_started/configuration.html"
)
if site_name is not None:
exc.message += f"\n{help_message}"
raise
required_message = (
"Required configuration setting {!r} missing. \nThis setting can be"
" provided in a praw.ini file, as a keyword argument to the `Reddit` class"
" constructor, or as an environment variable."
)
for attribute in ("client_id", "user_agent"):
if getattr(self.config, attribute) in (self.config.CONFIG_NOT_SET, None):
raise MissingRequiredAttributeException(
required_message.format(attribute)
)
if self.config.client_secret is self.config.CONFIG_NOT_SET:
raise MissingRequiredAttributeException(
f"{required_message.format('client_secret')}\nFor installed"
" applications this value must be set to None via a keyword argument"
" to the `Reddit` class constructor."
)
self._check_for_update()
self._prepare_objector()
self._prepare_prawcore(requestor_class, requestor_kwargs)
self.auth = models.Auth(self, None)
"""An instance of :class:`.Auth`.
Provides the interface for interacting with installed and web applications.
.. seealso::
:ref:`auth_url`
"""
self.drafts = models.DraftHelper(self, None)
"""An instance of :class:`.DraftHelper`.
Provides the interface for working with :class:`.Draft` instances.
For example, to list the currently authenticated user's drafts:
.. code-block:: python
drafts = reddit.drafts()
To create a draft on r/test run:
.. code-block:: python
reddit.drafts.create(title="title", selftext="selftext", subreddit="test")
"""
self.front = models.Front(self)
"""An instance of :class:`.Front`.
Provides the interface for interacting with front page listings. For example:
.. code-block:: python
for submission in reddit.front.hot():
print(submission)
"""
self.inbox = models.Inbox(self, None)
"""An instance of :class:`.Inbox`.
Provides the interface to a user's inbox which produces :class:`.Message`,
:class:`.Comment`, and :class:`.Submission` instances. For example, to iterate
through comments which mention the authorized user run:
.. code-block:: python
for comment in reddit.inbox.mentions():
print(comment)
"""
self.live = models.LiveHelper(self, None)
"""An instance of :class:`.LiveHelper`.
Provides the interface for working with :class:`.LiveThread` instances. At
present only new LiveThreads can be created.
.. code-block:: python
reddit.live.create("title", "description")
"""
self.multireddit = models.MultiredditHelper(self, None)
"""An instance of :class:`.MultiredditHelper`.
Provides the interface to working with :class:`.Multireddit` instances. For
example you can obtain a :class:`.Multireddit` instance via:
.. code-block:: python
reddit.multireddit("samuraisam", "programming")
"""
self.redditors = models.Redditors(self, None)
"""An instance of :class:`.Redditors`.
Provides the interface for :class:`.Redditor` discovery. For example, to iterate
over the newest Redditors, run:
.. code-block:: python
for redditor in reddit.redditors.new(limit=None):
print(redditor)
"""
self.subreddit = models.SubredditHelper(self, None)
"""An instance of :class:`.SubredditHelper`.
Provides the interface to working with :class:`.Subreddit` instances. For
example to create a :class:`.Subreddit` run:
.. code-block:: python
reddit.subreddit.create("coolnewsubname")
To obtain a lazy :class:`.Subreddit` instance run:
.. code-block:: python
reddit.subreddit("test")
Multiple subreddits can be combined and filtered views of r/all can also be used
just like a subreddit:
.. code-block:: python
reddit.subreddit("redditdev+learnpython+botwatch")
reddit.subreddit("all-redditdev-learnpython")
"""
self.subreddits = models.Subreddits(self, None)
"""An instance of :class:`.Subreddits`.
Provides the interface for :class:`.Subreddit` discovery. For example, to
iterate over the set of default subreddits run:
.. code-block:: python
for subreddit in reddit.subreddits.default(limit=None):
print(subreddit)
"""
self.user = models.User(self)
"""An instance of :class:`.User`.
Provides the interface to the currently authorized :class:`.Redditor`. For
example to get the name of the current user run:
.. code-block:: python
print(reddit.user.me())
"""
def _check_for_async(self):
if self.config.check_for_async: # pragma: no cover
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return
except NameError:
pass
in_async = False
try:
asyncio.get_running_loop()
in_async = True
except Exception: # Quietly fail if any exception occurs during the check
pass
if in_async:
logger.warning(
"It appears that you are using PRAW in an asynchronous"
" environment.\nIt is strongly recommended to use Async PRAW:"
" https://asyncpraw.readthedocs.io.\nSee"
" https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments"
" for more info.\n",
)
def _check_for_update(self):
if UPDATE_CHECKER_MISSING:
return
if not Reddit.update_checked and self.config.check_for_updates:
update_check(__package__, __version__)
Reddit.update_checked = True
def _prepare_common_authorizer(self, authenticator):
if self._token_manager is not None:
warn(
"Token managers have been deprecated and will be removed in the near"
" future. See https://www.reddit.com/r/redditdev/comments/olk5e6/"
"followup_oauth2_api_changes_regarding_refresh/ for more details.",
category=DeprecationWarning,
stacklevel=2,
)
if self.config.refresh_token:
raise TypeError(
"``refresh_token`` setting cannot be provided when providing"
" ``token_manager``"
)
self._token_manager.reddit = self
authorizer = Authorizer(
authenticator,
post_refresh_callback=self._token_manager.post_refresh_callback,
pre_refresh_callback=self._token_manager.pre_refresh_callback,
)
elif self.config.refresh_token:
authorizer = Authorizer(
authenticator, refresh_token=self.config.refresh_token
)
else:
self._core = self._read_only_core
return
self._core = self._authorized_core = session(authorizer)
def _prepare_objector(self):
mappings = {
self.config.kinds["comment"]: models.Comment,
self.config.kinds["message"]: models.Message,
self.config.kinds["redditor"]: models.Redditor,
self.config.kinds["submission"]: models.Submission,
self.config.kinds["subreddit"]: models.Subreddit,
self.config.kinds["trophy"]: models.Trophy,
"Button": models.Button,
"Collection": models.Collection,
"Draft": models.Draft,
"DraftList": models.DraftList,
"Image": models.Image,
"LabeledMulti": models.Multireddit,
"Listing": models.Listing,
"LiveUpdate": models.LiveUpdate,
"LiveUpdateEvent": models.LiveThread,
"MenuLink": models.MenuLink,
"ModeratedList": models.ModeratedList,
"ModmailAction": models.ModmailAction,
"ModmailConversation": models.ModmailConversation,
"ModmailConversations-list": models.ModmailConversationsListing,
"ModmailMessage": models.ModmailMessage,
"Submenu": models.Submenu,
"TrophyList": models.TrophyList,
"UserList": models.RedditorList,
"UserSubreddit": models.UserSubreddit,
"button": models.ButtonWidget,
"calendar": models.Calendar,
"community-list": models.CommunityList,
"custom": models.CustomWidget,
"id-card": models.IDCard,
"image": models.ImageWidget,
"menu": models.Menu,
"modaction": models.ModAction,
"moderator-list": models.ModeratorListing,
"moderators": models.ModeratorsWidget,
"more": models.MoreComments,
"post-flair": models.PostFlairWidget,
"rule": models.Rule,
"stylesheet": models.Stylesheet,
"subreddit-rules": models.RulesWidget,
"textarea": models.TextArea,
"widget": models.Widget,
}
self._objector = Objector(self, mappings)
def _prepare_prawcore(self, requestor_class=None, requestor_kwargs=None):
requestor_class = requestor_class or Requestor
requestor_kwargs = requestor_kwargs or {}
requestor = requestor_class(
USER_AGENT_FORMAT.format(self.config.user_agent),
self.config.oauth_url,
self.config.reddit_url,
**requestor_kwargs,
)
if self.config.client_secret:
self._prepare_trusted_prawcore(requestor)
else:
self._prepare_untrusted_prawcore(requestor)
def _prepare_trusted_prawcore(self, requestor):
authenticator = TrustedAuthenticator(
requestor,
self.config.client_id,
self.config.client_secret,
self.config.redirect_uri,
)
read_only_authorizer = ReadOnlyAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
if self.config.username and self.config.password:
script_authorizer = ScriptAuthorizer(
authenticator, self.config.username, self.config.password
)
self._core = self._authorized_core = session(script_authorizer)
else:
self._prepare_common_authorizer(authenticator)
def _prepare_untrusted_prawcore(self, requestor):
authenticator = UntrustedAuthenticator(
requestor, self.config.client_id, self.config.redirect_uri
)
read_only_authorizer = DeviceIDAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
self._prepare_common_authorizer(authenticator)
def comment(
self, # pylint: disable=invalid-name
id: Optional[str] = None, # pylint: disable=redefined-builtin
url: Optional[str] = None,
):
"""Return a lazy instance of :class:`.Comment`.
:param id: The ID of the comment.
:param url: A permalink pointing to the comment.
.. note::
If you want to obtain the comment's replies, you will need to call
:meth:`~.Comment.refresh` on the returned :class:`.Comment`.
"""
return models.Comment(self, id=id, url=url)
def domain(self, domain: str):
"""Return an instance of :class:`.DomainListing`.
:param domain: The domain to obtain submission listings for.
"""
return models.DomainListing(self, domain)
def get(
self,
path: str,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
):
"""Return parsed objects returned from a GET request to ``path``.
:param path: The path to fetch.
:param params: The query parameters to add to the request (default: ``None``).
"""
return self._objectify_request(method="GET", params=params, path=path)
def info(
self,
fullnames: Optional[Iterable[str]] = None,
url: Optional[str] = None,
subreddits: Optional[Iterable[Union["praw.models.Subreddit", str]]] = None,
) -> Generator[
Union["praw.models.Subreddit", "praw.models.Comment", "praw.models.Submission"],
None,
None,
]:
"""Fetch information about each item in ``fullnames``, ``url``, or ``subreddits``.
:param fullnames: A list of fullnames for comments, submissions, and/or
subreddits.
:param url: A url (as a string) to retrieve lists of link submissions from.
:param subreddits: A list of subreddit names or :class:`.Subreddit` objects to
retrieve subreddits from.
:returns: A generator that yields found items in their relative order.
Items that cannot be matched will not be generated. Requests will be issued in
batches for each 100 fullnames.
.. note::
For comments that are retrieved via this method, if you want to obtain its
replies, you will need to call :meth:`~.Comment.refresh` on the yielded
:class:`.Comment`.
.. note::
When using the URL option, it is important to be aware that URLs are treated
literally by Reddit's API. As such, the URLs ``"youtube.com"`` and
``"https://www.youtube.com"`` will provide a different set of submissions.
"""
none_count = (fullnames, url, subreddits).count(None)
if none_count != 2:
raise TypeError(
"Either `fullnames`, `url`, or `subreddits` must be provided."
)
is_using_fullnames = fullnames is not None
ids_or_names = fullnames if is_using_fullnames else subreddits
if ids_or_names is not None:
if isinstance(ids_or_names, str):
raise TypeError(
"`fullnames` and `subreddits` must be a non-str iterable."
)
api_parameter_name = "id" if is_using_fullnames else "sr_name"
def generator(names):
if is_using_fullnames:
iterable = iter(names)
else:
iterable = iter([str(item) for item in names])
while True:
chunk = list(islice(iterable, 100))
if not chunk:
break
params = {api_parameter_name: ",".join(chunk)}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(ids_or_names)
def generator(url):
params = {"url": url}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(url)
def _objectify_request(
self,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json=None,
method: str = "",
params: Optional[Union[str, Dict[str, str]]] = None,
path: str = "",
) -> Any:
"""Run a request through the ``Objector``.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param method: The HTTP method (e.g., GET, POST, PUT, DELETE).
:param params: The query parameters to add to the request (default: ``None``).
:param path: The path to fetch.
"""
return self._objector.objectify(
self.request(
data=data,
files=files,
json=json,
method=method,
params=params,
path=path,
)
)
def _handle_rate_limit(
self, exception: RedditAPIException
) -> Optional[Union[int, float]]:
for item in exception.items:
if item.error_type == "RATELIMIT":
amount_search = self._ratelimit_regex.search(item.message)
if not amount_search:
break
seconds = int(amount_search.group(1))
if amount_search.group(2).startswith("minute"):
seconds *= 60
elif amount_search.group(2).startswith("millisecond"):
seconds = 0
if seconds <= int(self.config.ratelimit_seconds):
sleep_seconds = seconds + 1
return sleep_seconds
return None
def delete(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json=None,
params: Optional[Union[str, Dict[str, str]]] = None,
) -> Any:
"""Return parsed objects returned from a DELETE request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param params: The query parameters to add to the request (default: ``None``).
"""
return self._objectify_request(
data=data, json=json, method="DELETE", params=params, path=path
)
def patch(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json=None,
) -> Any:
"""Return parsed objects returned from a PATCH request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
"""
return self._objectify_request(data=data, method="PATCH", path=path, json=json)
def post(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
params: Optional[Union[str, Dict[str, str]]] = None,
json=None,
) -> Any:
"""Return parsed objects returned from a POST request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param params: The query parameters to add to the request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
"""
if json is None:
data = data or {}
attempts = 3
last_exception = None
while attempts > 0:
attempts -= 1
try:
return self._objectify_request(
data=data,
files=files,
json=json,
method="POST",
params=params,
path=path,
)
except RedditAPIException as exception:
last_exception = exception
seconds = self._handle_rate_limit(exception=exception)
if seconds is None:
break
second_string = "second" if seconds == 1 else "seconds"
logger.debug(f"Rate limit hit, sleeping for {seconds} {second_string}")
time.sleep(seconds)
raise last_exception
def put(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json=None,
):
"""Return parsed objects returned from a PUT request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
"""
return self._objectify_request(data=data, json=json, method="PUT", path=path)
def random_subreddit(self, nsfw: bool = False) -> "praw.models.Subreddit":
"""Return a random lazy instance of :class:`.Subreddit`.
:param nsfw: Return a random NSFW (not safe for work) subreddit (default:
``False``).
"""
url = API_PATH["subreddit"].format(subreddit="randnsfw" if nsfw else "random")
path = None
try:
self.get(url, params={"unique": self._next_unique})
except Redirect as redirect:
path = redirect.path
return models.Subreddit(self, path.split("/")[2])
def redditor(
self, name: Optional[str] = None, fullname: Optional[str] = None
) -> "praw.models.Redditor":
"""Return a lazy instance of :class:`.Redditor`.
:param name: The name of the redditor.
:param fullname: The fullname of the redditor, starting with ``t2_``.
Either ``name`` or ``fullname`` can be provided, but not both.
"""
return models.Redditor(self, name=name, fullname=fullname)
def request(
self,
method: str,
path: str,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json=None,
) -> Any:
"""Return the parsed JSON data returned from a request to URL.
:param method: The HTTP method (e.g., GET, POST, PUT, DELETE).
:param path: The path to fetch.
:param params: The query parameters to add to the request (default: ``None``).
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
"""
if self.config.check_for_async:
self._check_for_async()
if data and json:
raise ClientException("At most one of `data` or `json` is supported.")
try:
return self._core.request(
method,
path,
data=data,
files=files,
params=params,
timeout=self.config.timeout,
json=json,
)
except BadRequest as exception:
try:
data = exception.response.json()
except ValueError:
if exception.response.text:
data = {"reason": exception.response.text}
else:
raise exception
if set(data) == {"error", "message"}:
raise
explanation = data.get("explanation")
if "fields" in data:
assert len(data["fields"]) == 1
field = data["fields"][0]
else:
field = None
raise RedditAPIException(
[data["reason"], explanation, field]
) from exception
def submission( # pylint: disable=invalid-name,redefined-builtin
self, id: Optional[str] = None, url: Optional[str] = None
) -> "praw.models.Submission":
"""Return a lazy instance of :class:`.Submission`.
:param id: A Reddit base36 submission ID, e.g., ``"2gmzqe"``.
:param url: A URL supported by :meth:`.Submission.id_from_url`.
Either ``id`` or ``url`` can be provided, but not both.
"""
return models.Submission(self, id=id, url=url)
def username_available(self, name: str) -> bool:
"""Check to see if the username is available.
For example, to check if the username ``bboe`` is available, try:
.. code-block:: python
reddit.username_available("bboe")
"""
return self._objectify_request(
path=API_PATH["username_available"], params={"user": name}, method="GET"
)
| praw-dev/praw | praw/reddit.py | Python | bsd-2-clause | 33,704 | [
"VisIt"
] | c49fa6b7969fea06047007a19e64cf4da8bd0e681b7eb88c79b7f3fc2e4298fc |
accuracy = 1e-8
class Cell:
def __init__(self, vtkCell, bounds, q):
self.vtkCell = vtkCell
self.bounds = bounds
self.q = q
def __eq__(self, other):
global accuracy
if abs(self.q - other.q) > accuracy:
return false
if len(sel.bounds) != len(other.bounds):
return false
for i in xrange(len(self.bounds)):
if abs(self.bounds[i] - other.bounds[i]) > accuracy:
return false
return true
def __cmp__(self, other):
global accuracy
if self.q - other.q > accuracy:
return 1
elif other.q - self.q > accuracy:
return -1
if len(self.bounds) != len(other.bounds):
return false
for i in xrange(len(self.bounds)):
if self.bounds[i] - other.bounds[i] > accuracy:
return 1
elif other.bounds[i] - self.bounds[i] > accuracy:
return -1
return 0
def __str__(self):
return "q: " + str(self.q) + " bounds: " + str(self.bounds)
def parseRange(argument):
if ':' in argument:
return range(*map(int, argument.split(':')))
return range(int(argument), int(argument)+1)
def readCellsFromFile(cells, path, iteration, rank):
import vtk
import os.path
filename = path.replace('__ITERATION__', str(iteration)).replace('__RANK__', str(rank))
if os.path.exists(filename):
reader = vtk.vtkDataSetReader()
reader.SetFileName(filename)
reader.SetReadAllScalars(True)
reader.Update()
grid = reader.GetOutput()
numberOfCells = grid.GetNumberOfCells()
cellData = grid.GetCellData()
qs = cellData.GetScalars("q0")
for cellId in xrange(numberOfCells):
vtkCell = grid.GetCell(cellId)
q = qs.GetTuple(cellId)[0]
cells.append(Cell(vtkCell, vtkCell.GetBounds()[:], q))
return numberOfCells
else:
return 0
def findClosestMatch(cell, cells):
bestIndex = -1
minDistance = 1000000
import math
for index in xrange(len(cells)):
c = cells[index]
distance = 0
for i in xrange(len(cell.bounds)):
distance += (cell.bounds[i] - c.bounds[i])**2
distance = math.sqrt((c.q - cell.q)**2 * 10 + distance)
if distance < minDistance:
minDistance = distance
bestIndex = index
return bestIndex
def findCellInList(cell, cells):
lower = 0
upper = len(cells)
while(upper > lower):
middle = (upper + lower) / 2
middleCell = cells[middle]
if middleCell < cell:
lower = middle + 1
elif middleCell > cell:
upper = middle
else:
return middle
return -1
def main():
from argparse import ArgumentParser
parser = ArgumentParser(description='Tool for comparing vtk output of parallel runs.')
parser.add_argument('path1', help='The path to the first set of vtk files. Use __ITERATION__ for iteration number and __RANK__ for rank number.')
parser.add_argument('path2', help='The path to the second set of vtk files. Use __ITERATION__ for iteration number and __RANK__ for rank number.')
parser.add_argument('iteration1', type=int, help='The iteration number of the first set of vtk files.')
parser.add_argument('ranks1', help='The range of ranks for the first set of vtk files. Define single number or min:max.')
parser.add_argument('iteration2', type=int, help='The iteration number of the second set of vtk files.')
parser.add_argument('ranks2', help='The range of ranks for the second set of vtk files. Define single number or min:max.')
parser.add_argument('accuracy', help='The accuracy for numerical equality.', type=float, nargs='?', const='1e-5')
arguments = parser.parse_args()
global accuracy
accuracy = arguments.accuracy
if arguments.path2 == 'SameAsPath1':
path2 = arguments.path1
else:
path2 = arguments.path2
#Loop through ranks1
cells1 = [] #set()
ranks1 = parseRange(arguments.ranks1)
for rank in ranks1:
print "1: Parsing rank...", rank
numberOfCells = readCellsFromFile(cells1, arguments.path1, arguments.iteration1, rank)
print "Read", numberOfCells, "cells."
print "1: Total number of cells:", len(cells1)
#Loop through ranks2
cells2 = [] #set()
ranks2 = parseRange(arguments.ranks2)
print ranks2
for rank in ranks2:
print "2: Parsing rank", rank
numberOfCells = readCellsFromFile(cells2, path2, arguments.iteration2, rank)
print "Read", numberOfCells, "cells."
print "2: Total number of cells:", len(cells2)
#Compare lists
if len(cells1) != len(cells2):
raise Exception("Number of cells do not match!")
cells1.sort()
cells2.sort()
for cell in cells1:
index = findCellInList(cell, cells2)
if index == -1:
bestMatch = findClosestMatch(cell, cells2)
if bestMatch == -1:
bestMatchString = ""
else:
bestMatchString = "Best match is " + str(cells2[bestMatch])
raise Exception("No matching cell for " + str(cell) + ". " + bestMatchString)
else:
del cells2[index]
print "All cells match"
if __name__=="__main__":
main()
| unterweg/peanoclaw | testscenarios/tools/compareResult.py | Python | bsd-3-clause | 5,050 | [
"VTK"
] | f0423a7b15664cbb71f3846a782615dae4319835a99e314577532781ef53c39e |
from django.core.management.base import BaseCommand
from schools.models import School, BoundaryType
from stories.models import Question, Questiongroup, QuestionType, QuestiongroupQuestions, Source
class Command(BaseCommand):
args = ""
help = """Populate DB with V3 GKA IVRS questions
./manage.py populatev3gkaivrsquestions"""
def handle(self, *args, **options):
s = Source.objects.get(name="ivrs")
q = Questiongroup.objects.get_or_create(version=5, source=s)[0]
b = BoundaryType.objects.get(name='Primary School')
qtype_checkbox = QuestionType.objects.get(name='checkbox')
qtype_numeric = QuestionType.objects.get(name='numeric')
q1 = Question.objects.get(
text="Was the school open?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)
q2 = Question.objects.get(
text="Class visited",
data_type=1,
question_type=qtype_numeric,
options="{1, 2, 3, 4, 5, 6, 7, 8}",
school_type=b
)
q3 = Question.objects.get_or_create(
text="Were the class 4 and 5 math teachers trained in GKA methodology in the school you have visited?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
q4 = Question.objects.get(
text="Was Math class happening on the day of your visit?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)
q5 = Question.objects.get(
text="Did you see children using the Ganitha Kalika Andolana TLM?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)
q6 = Question.objects.get(
text="Which Ganitha Kalika Andolana TLM was being used by teacher?",
data_type=1,
question_type=qtype_numeric,
options="{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}",
school_type=b
)
q7 = Question.objects.get_or_create(
text="Were multiple TLMs being used?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
q8 = Question.objects.get_or_create(
text="Did you see representational stage being practiced during the class?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
q9 = Question.objects.get(
text="Was group work happening in the class on the day of your visit?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)
q10 = Question.objects.get(
text="Does the school have a separate functional toilet for girls?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q1, sequence=1)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q2, sequence=2)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q3, sequence=3)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q4, sequence=4)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q5, sequence=5)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q6, sequence=6)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q7, sequence=7)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q8, sequence=8)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q9, sequence=9)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q10, sequence=10)
print "V3 GKA questions populated"
| klpdotorg/dubdubdub | apps/ivrs/management/commands/archived_commands/populatev3gkaivrsquestions.py | Python | mit | 4,419 | [
"VisIt"
] | 2e52caecb1e2cc08faf2611f5887b72f338d13c929bafc1a53caf694cbf4ffe7 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 EPFL (Ecole Polytechnique federale de Lausanne)
# Laboratory for Biomolecular Modeling, School of Life Sciences
#
# POW is free software ;
# you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation ;
# either version 2 of the License, or (at your option) any later version.
# POW is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY ;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with POW ;
# if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
# Author : Matteo Degiacomi, matteothomas.degiacomi@epfl.ch
# Web site : http://lbm.epfl.ch
from Default import Parser as R
from Default import Space as S
from Default import Postprocess as PP
import numpy as np
import os, sys
from copy import deepcopy
from scipy.cluster.vq import *
from Protein import Protein
import AssemblyHeteroMultimer as A
import flexibility_new as F
import Multimer as M
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
import CCC_simulMap as CCC
import ClusterAndDraw as CnD
import wx
class Parser(R):
def __init__(self):
self.add('rmsdSelect','rmsd_select','array str',"NA")
self.add('constraint','constraint','str',"NA")
self.add('energy','energy_type','str',"vdw")
self.add('detectClash','detect_clash','str',"on")
self.add('target','target','array float',np.array([]))
self.add('mode','mode','str',"seed")
self.add('mixingWeight','mix_weight','float', 0.2)
# style of the assembly, rigid or flexible
self.add('assembly_style','assembly_style','str',"rigid")
# flexibility flags
self.add('topology', 'topology', 'array str', 'NA')
self.add('trajectory','trajectory', 'array str','NA')
self.add('trajSelection','trajselection','str','NA')
self.add('ratio','ratio','float',0.9)
self.add('align', 'align', 'str', 'no')
self.add('projection','proj_file','str',"NA")
# monomer flags
self.add('monomer','monomer_file_name','array str', "NA")
# post processing flags
self.add('cluster_threshold','cluster_threshold','float',"NA")
self.add('output_folder','output_folder','str',"result")
# density map docking flag
self.add('density_map_docking','map_dock_OnOff', 'str', 'no')
self.add('density_map','density_map', 'str', 'NA')
def check_variables(self):
if self.cluster_threshold<0:
print "ERROR: clustering threshlod should be greater than 0!"
sys.exit(1)
# GIORGIO_CODE check existence of pdb files for the monomers in the folder XXXXXXXXXXXXXXXXXXXXXX
for nickname,pdb_file in self.monomer_file_name:
if pdb_file != "NA" and nickname != "NA": # and self.monomer_style=="rigid"
tmp=os.path.abspath(pdb_file)
if os.path.isfile(pdb_file)!=1 :
print "ERROR: monomer pdb file %s not found"%pdb_file
sys.exit(1)
# CHECKING THE TRAJECTORIES AND THE TOPOLOGY FILE
if self.assembly_style=="flexible":
for nickname,top_file in self.topology:
if top_file != "NA" and nickname != "NA": # and self.monomer_style=="rigid"
tmp=os.path.abspath(pdb_file)
if os.path.isfile(top_file)!=1 :
print "ERROR: monomer pdb file %s not found"%top_file
sys.exit(1)
for nickname,traj_file in self.trajectory:
if traj_file != "NA" and nickname != "NA": # and self.monomer_style=="rigid"
tmp=os.path.abspath(traj_file)
if os.path.isfile(traj_file)!=1 :
print "ERROR: monomer pdb file %s not found"%traj_file
sys.exit(1)
# checking that every trajectory has a topology:
if len(self.trajectory) != len(self.topology):
print "ERROR: unequal number of topologies and trajectories"
#check if number of target measures are provided
if len(self.target) == 0 :
print 'ERROR: target measures not specified!'
sys.exit(1)
#test if constraint file exists, and function constaint_check is available
#try:
exec 'import %s as constraint'%(self.constraint.split('.')[0])
#except:
# print "ERROR: load of user defined constraint function failed!"
# sys.exit(1)
try:
constraint.constraint_check
except AttributeError:
print 'ERROR: constraint_check function not found in file %s'%self.constraint
sys.exit(1)
# check whether density flag is on or off
if self.map_dock_OnOff == "on" :
if self.density_map != "NA":
self.map_dock_OnOff = True
else:
print 'ERROR: if density map flag is on, a density map should be present in the directory'
sys.exit(1)
print ">> Electron Density Map Docking Mode"
elif self.map_dock_OnOff == "off":
self.map_dock_OnOff = False
else:
print 'ERROR: density_map_docking should be either "on" or "off"'
sys.exit(1)
class Structure():
def __init__ (self):
# initialising the values
self.monomer = "NA" # the monomeric unit
self.pdb_file_name = "NA"
self.index_CA_monomer = "NA"
self.flexibility = "NA"
self.init_coords = "NA"
def read_pdb (self, pdb):
self.pdb_file_name = pdb
self.monomer = Protein()
self.monomer.import_pdb(pdb)
self.init_coords = self.monomer.get_xyz()
def compute_PCA (self, topology,trajectory,align,ratio,mode, proj_file):
self.flexibility = F.Flexibility_PCA()
self.flexibility.compute_eigenvectors(topology,trajectory,align,ratio,mode, proj_file)
def setCoords (self):
self.init_coords = self.monomer.get_xyz()
#def get_index
class Data:
index_ligand=[]
index_receptor=[]
cg_atoms=[]
def __init__(self,params):
self.structure_hash = {}
self.structure_list = []
volume_structure_hash = {} # this is used to get the biggest structure
# GIORGIO_CODE create structure instances of the rigid monomers
for nickName,pdb_file in params.monomer_file_name:
# create instance of the structure class
s = Structure()
s.read_pdb (pdb_file)
volume_structure_hash[len(s.monomer.get_xyz())] = [s, nickName]
# create structure instance for the flexible monomers
if params.assembly_style=="flexible":
print ">> flexible docking requested for structures, launching PCA..."
for nickName, traj_file in params.trajectory:
try:
# get the topology file:
for nickName2, top_file in params.topology:
if nickName2 == nickName:
break
# create the structure and compute the PCA
s = Structure()
s.compute_PCA(top_file, traj_file, params.align, params.ratio, params.mode, params.proj_file)
s.read_pdb("protein.pdb")
volume_structure_hash[len(s.monomer.get_xyz())] = [s, nickName]
except ImportError, e:
sys.exit(1)
# TODO: work on the deform mode, but ask Matteo before
if params.mode=="deform":
self.structure_ligand=Protein()
self.structure_ligand.import_pdb("protein.pdb")
self.ligand.import_pdb("CA.pdb")
# getting the biggest structure and putting at the beginning so that it is fixed
sorted_volumes = volume_structure_hash.keys()
sorted_volumes.sort()
sorted_volumes.reverse()
for i in sorted_volumes:
# insert the elements in a list
self.structure_list.append( volume_structure_hash[i][0] ) # insert the structure
self.structure_hash[volume_structure_hash[i][1]] = self.structure_list.index(volume_structure_hash[i][0])
self.structure_list_and_name = [self.structure_list, self.structure_hash]
print self.structure_list_and_name
#LIGAND STRUCTURE
#self.ligand = Protein()
# if params.assembly_style=="flexible":
# print ">> flexible docking requested for ligand, launching PCA..."
# try:
# self.flex_ligand=F.Flexibility_PCA()
# self.flex_ligand.compute_eigenvectors(params.ligand_topology,params.ligand_trajectory,params.ligand_align,params.ligand_ratio,params.mode,params.ligand_proj_file)
# self.ligand.import_pdb("protein.pdb") # importing the middle structure
# except ImportError, e:
# sys.exit(1)
#
# if params.mode=="deform":
# self.structure_ligand=Protein()
# self.structure_ligand.import_pdb("protein.pdb")
# self.ligand.import_pdb("CA.pdb")
#else:
#load monomeric structure (the pdb file)
#self.ligand.import_pdb(params.ligand_file_name)
if params.energy_type=="vdw":
self.CA_index_of_structures = self.get_index(["CA"])
#[self.index_ligand,self.index_receptor]=self.get_index(["CA","CB"])
# if the density map docking is on load the structure into data:
if params.map_dock_OnOff:
self.density_map_fileName = params.density_map
def get_index(self,atoms=["CA","CB"]):
#generate a dummy assembly and extract the indexes where atoms of interest are located
# first create the numpy array containing all null translation and rotation for each of the mobile structures
null_coordinate_array = np.zeros((len(self.structure_list)-1)*6)
assembly = A.AssemblyHeteroMultimer(self.structure_list_and_name)
assembly.place_all_mobile_structures(null_coordinate_array)
#ligand_index=[]
#receptor_index=[]
index_of_all_structures = [] # this is going to be an array of arrays
for aname in atoms:
for structure_number in xrange(0,len(self.structure_list),1):
index_of_all_structures.append([])
#append indexes of an element in atoms list for all structures
[m,index]=assembly.atomselect_structure(structure_number , "*","*",aname,True)
for i in index:
index_of_all_structures[structure_number].append(i)
##append indexes of an element in atoms list for receptor
#[m,index]=assembly.atomselect_receptor("*","*",aname,True)
#for i in index:
#receptor_index.append(i)
return index_of_all_structures
class Space(S):
def __init__(self,params,data):
len_flexi=0
if params.assembly_style=="flexible":
for structure in data.structure_list:
if structure.flexibility != "NA":
len_flexi += len(structure.flexibility.eigenspace_size)
len_rec=0
#if params.receptor_style=="flexible":
#len_rec=len(data.flex_receptor.eigenspace_size)
len_rigid_dim = 6*(len(data.structure_list)-1)
# for hetero-multimer assembly, given that every MOBILE (so exept the first one) protein has 6 degrees of freedom
self.low=np.zeros(len_rigid_dim +len_flexi)
self.high=np.zeros(len_rigid_dim +len_flexi)
self.cell_size=np.zeros(len_rigid_dim +len_flexi)
self.boundary_type=np.zeros(len_rigid_dim +len_flexi)
#box size as given by all the structures dimensions
first_min=np.min(data.structure_list[0].monomer.get_xyz(),axis=0)
first_max=np.max(data.structure_list[0].monomer.get_xyz(),axis=0)
distance_array = []
for x in xrange (1,len(data.structure_list),1):
distance_array.append(np.max(data.structure_list[x].monomer.get_xyz(),axis=0) - np.min(data.structure_list[x].monomer.get_xyz(),axis=0))
distance_array = np.array(distance_array)
summed_distances = np.sum(distance_array, axis = 0)
box_min=first_min-(summed_distances)
box_max=first_max+(summed_distances)
if len(params.high_input)!=len(params.low_input):
print "ERROR: boundaryMin and boundaryMax should have the same length!"
sys.exit(1)
#assign low boundaries
if params.low_input!="NA" :
if len(params.low_input)== len_rigid_dim :
for i in xrange(0,len(params.low_input),1):
self.low[i]=params.low_input[i]
else:
print "ERROR: boundaryMin should contain 6 values (3 rotations, 3 translations)"
sys.exit(1)
else:
print "WARNING: boundaryMin undefined, using default values"
i = 0
for x in xrange(0, len_rigid_dim ,1):
if i < 3:
self.low[x] = box_min[i]
i += 1
elif (i > 2) and (i != 6):
self.low[x] = 0.0
i+=1
if i == 6:
i = 0
#assign high boundaries
if params.high_input!="NA" :
if len(params.high_input)== len_rigid_dim:
for i in xrange(0,len(params.high_input),1):
self.high[i]=params.high_input[i]
else:
print "ERROR: boundaryMax should contain 6 values (3 rotation, 3 translation)"
sys.exit(1)
else:
print "WARNING: boundaryMax undefined, using default values"
i = 0
for x in xrange(0, len_rigid_dim ,1):
if i < 3:
self.high[x] = box_max[i]
i += 1
elif (i > 2) and (i != 6):
self.high[x] = 360.0
i+=1
if i == 6:
i = 0
# add all the flexible structures eigenvector fluctuations in the search space
if params.assembly_style=="flexible":
i = 0
for structure in data.structure_list:
if structure.flexibility != "NA":
for x in xrange(0, len(structure.flexibility.eigenspace_size),1):
self.low[len_rigid_dim+i]=-structure.flexibility.eigenspace_size[x]
self.high[len_rigid_dim+i]= structure.flexibility.eigenspace_size[x]
i += 1
#add ligand eigenvector fluctuations in search space
# for i in xrange(0,len_flexi,1):
# self.low[len_rigid_dim+i]=-data.flex_ligand.eigenspace_size[i]
# self.high[len_rigid_dim+i]=data.flex_ligand.eigenspace_size[i]
#check boundary conditions consistency
if len(self.low) != len(self.high):
print 'ERROR: dimensions of min and max boundary conditions are not the same'
sys.exit(1)
if (self.low>self.high).any():
print 'ERROR: a lower boundary condition is greated than a higher one'
sys.exit(1)
#define cell size
self.cell_size=self.high-self.low
#set boundary type (periodic for angles, repulsive for translation)
if params.boundary_type=="NA":
for i in xrange(0,len(self.low),1):
self.boundary_type[i]=0
elif params.boundary_type!="NA" and len(params.boundary_type)!=len(self.low):
print 'ERROR: boundaries type inconsistent with system dimensions'
print 'ERROR: %s dimensions given, but %s needed!'%(len(params.boundary_type),len(self.low))
sys.exit(1)
else:
for i in xrange(0,len(self.low),1):
self.boundary_type[i]=params.boundary_type[i]
class Fitness:
def __init__(self,data,params):
self.mode=params.mode
self.map_docking_flag = params.map_dock_OnOff # do so because you want to pass this var to the function evaluate below
# loading the reference/experimental density map file if flag is on
if self.map_docking_flag:
self.density_map_fileName = params.density_map
#check if target exists
try: params.target
except NameError:
print 'ERROR: target measures not found'
sys.exit(1)
self.target=params.target
self.constraint=params.constraint.split('.')[0] # Xx constraint [rigidRandom].py
#test if constraint file exists, and function constaint_check is available
try:
exec 'import %s as constraint'%(self.constraint)
except ImportError, e:
print "ERROR: load of user defined constraint function failed!"
sys.exit(1)
try: constraint.constraint_check
except NameError:
print 'ERROR: constraint_check function not found'
#data to manipulate
self.data=data
self.assembly_style=params.assembly_style
#self.receptor_style=params.receptor_style
self.len_lig=0
#if params.ligand_style=="flexible":
#self.len_lig=len(self.data.flex_ligand.eigenspace_size)
self.len_rec=0
#if params.receptor_style=="flexible":
#self.len_rec=len(self.data.flex_receptor.eigenspace_size)
self.c1=params.mix_weight
def evaluate(self,num,pos):
exec 'import %s as constraint'%(self.constraint)
import AssemblyHeteroMultimer as A
# if ligand is flexible, select the most appropriate frame
for structure in self.data.structure_list:
if self.assembly_style=="flexible" and structure.flexibility != "NA":
len_rigid_dim = 6*(len(self.data.structure_list)-1)
i = 0
deform_coeffs = self.coordinateArray[n][len_rigid_dim : len_rigid_dim + i + len(structure.flexibility.eigenspace_size) ]
if self.mode=="seed":
pos_eig=structure.flexibility.proj[:,structure.flexibility.centroid]+deform_coeffs
code,min_dist=vq(structure.flexibility.proj.transpose(),np.array([pos_eig]))
target_frame=min_dist.argmin()
coords=structure.flexibility.all_coords[:,target_frame]
coords_reshaped=coords.reshape(len(coords)/3,3)
structure.monomer.set_xyz(coords_reshaped)
else:
coords=structure.monomer.get_xyz()
coords_reshaped=coords.reshape(len(coords)*3)
for n in xrange(0,len(deform_coeffs),1):
coords_reshaped+=deform_coeffs[n]*structure.flexibility.eigenvec[:,n]
structure.monomer.set_xyz(coords_reshaped.reshape(len(coords_reshaped)/3,3))
i += len(structure.flexibility.eigenspace_size)
else:
structure.monomer.set_xyz(structure.init_coords)
# after getting the positions from PSO, create a new assembly according to those positions
self.assembly = A.AssemblyHeteroMultimer(self.data.structure_list_and_name)
self.assembly.place_all_mobile_structures(pos)
# ------------------------------- DEFAULT FITNESS FUNCTION --------------------------
if self.map_docking_flag == False:
#if needed, compute error with respect of target measures
distance=0
if len(self.target)!=0:
measure = constraint.constraint_check(self.data, self.assembly) # returns the distances between some ligand and receptor atoms
if len(measure) != len(self.target) :
print 'ERROR: measure = %s'%measure
print 'ERROR: target measure = %s'%self.target
print 'ERROR: constraint file produced %s measures, but %s target measures are provided!'%(len(measure),len(self.target))
sys.exit(1)
diff=self.target-np.array(measure)
distance=np.sqrt(np.dot(diff,diff))
#c1=0.1
#compute system energy
energy=0
if len(self.data.CA_index_of_structures[0])>0:
c1=self.c1 # was 0.2
energy=self.interface_vdw()
return c1*energy+(1-c1)*distance
#return energy/len(self.data.index_ligand)+distance
else:
print "WHAT THE...???"
#else:
# c1=0.001
# energy=self.measure_cg_energy(self.assembly,num)
# #fitness = coulomb+vdw+distance
# return c1*(energy[1]+energy[2])+(1-c1)*distance
# --------------------------------- DENSITY MAP DOCKING ----------------------------
elif self.map_docking_flag == True:
# the coefficient for CCC, bigger one will give a heavier weight for density map docking
c2 = 20 # try make vary from 1 to 100
#proceed to calculate check for the geometry constraints
distance=0
if len(self.target)!=0:
measure = constraint.constraint_check(self.data, self.assembly) # returns the distances between some ligand and receptor atoms
if len(measure) != len(self.target) :
print 'ERROR: measure = %s'%measure
print 'ERROR: target measure = %s'%self.target
print 'ERROR: constraint file produced %s measures, but %s target measures are provided!'%(len(measure),len(self.target))
sys.exit(1)
diff=self.target-np.array(measure)
distance=np.sqrt(np.dot(diff,diff))
# compute the systems energy
energy=0
if len(self.data.CA_index_of_structures[0])>0:
c1=self.c1
energy=self.interface_vdw()
fitness_score = c1*energy+(1-c1)*distance + c2 # the + 1 at the end is used to have a better score when good map dock fitting
# -------------------- DENSITY MAP DOCKING FITNESS
if fitness_score < (c2 + 10):
resol = 15
print ">>> Density map refinement rank "+str(rank)
# create the pbd file to be transformed into the density map
self.assembly.create_PDB_for_density_map(rank)
#create the simulated density map
CCC.make_simulated_map ("simulated_map"+str(rank)+".pdb", rank, 1, resol )
#compare the two density maps and extract their cross correlation coefficient:
ccc = CCC.compute_corr(self.density_map_fileName, "simulated_map"+str(rank)+".sit", resol)
#return the score of final function:
return c1*energy+(1-c1)*distance+ c2*(1 - ccc)
else:
return fitness_score
def measure_target(self):
#measure constraints
measure = constraint.constraint_check(self.assembly)
if len(measure) != len(self.target) :
print 'ERROR: measure = %s'%measure
print 'ERROR: target measure = %s'%self.target
print 'ERROR: constraint file produced %s measures, but %s target measures are provided!'%(len(measure),len(self.target))
sys.exit(1)
#measure distance within target values and obtained values
diff=self.target-np.array(measure)
distance=np.sqrt(np.dot(diff,diff))
return distance
def interface_vdw(self):
epsilon=1.0
sigma=4.7
cutoff=12.0
energy=0
# for Heteromultimer assembly, you need to compute the energy of every structure against each other:
for structure1_index in xrange (0,len(self.data.structure_list), 1):
for structure2_index in xrange (structure1_index,len(self.data.structure_list), 1):
d=[]
if structure1_index == structure2_index:
pass
else:
m1=self.assembly.get_structure_xyz(structure1_index)[self.data.CA_index_of_structures[structure1_index]]
m2=self.assembly.get_structure_xyz(structure2_index)[self.data.CA_index_of_structures[structure2_index]]
#extract coords of monomers 1 and 2 of multimeric structure according to desired atoms
#m1=self.assembly.get_ligand_xyz()[self.data.index_ligand]
#m2=self.assembly.get_receptor_xyz()[self.data.index_receptor]
#extract distances of every atom from all the others
for i in xrange(0,len(m1),1):
d.append(np.sqrt(np.sum((m2-m1[i])**2,axis=1)))
dist=np.array(d)
#detect interfacing atoms (atom couples at less than a certain cutoff distance
couples=np.array(np.where(dist<cutoff)) #detect couples of clashing residues
for i in xrange(0,len(couples[0]),1):
d=dist[couples[0,i],couples[1,i]]
energy+=4*epsilon*((sigma/d)**9-(sigma/d)**6)
return energy
class Postprocess(PP):
def __init__(self,data,params):
self.data=data
self.params=params
self.len_lig=0
#if params.assembly_style=="flexible":
#self.len_lig=len(self.data.flex_ligand.eigenspace_size)
self.len_rec=0
#if params.receptor_style=="flexible":
#self.len_rec=len(self.data.flex_receptor.eigenspace_size)
#load constraint file
self.constraint=params.constraint.split('.')[0]
try:
exec 'import %s as constraint'%(self.constraint)
except ImportError, e:
print "ERROR: load of user defined constraint function failed!"
sys.exit(1)
try:
constraint.constraint_check
except NameError:
print 'ERROR: constraint_check function not found'
#clustering according to rmsd of solutions in search space
def run(self):
if rank == 0:
#create output directory for generated PDB
self.OUTPUT_DIRECTORY=self.params.output_folder
if os.path.isdir(self.OUTPUT_DIRECTORY)!=1:
os.mkdir(self.OUTPUT_DIRECTORY)
#use superclass method to filter acceptable solutions
self.log=self.select_solutions(self.params) # -> the result is in fact the self.filter_log already
print ">> %s solutions filtered"%len(self.log[:,1])
if len(self.log[:,1])==0:
return
self.coordinateArray = deepcopy(self.log) #[:, 0:len(self.log[0,:])].astype(float)
self.dummyMatrix = np.empty(len(self.coordinateArray)**2)
self.dummyMatrix.fill(100)
self.distanceMatrix = self.dummyMatrix.reshape(len(self.coordinateArray),len(self.coordinateArray))
self.dummyMatrix = []
# variables to sliece the matrix into equal portions
total_size = (len(self.coordinateArray)**2)/2
binNo = size
indexes_per_bin = total_size / binNo
soustractor = 1
indexBinHash = {}
accumulator = 0
rankIterator = 0
lowBoundary = 0
# getting the sliced indexes
for i in xrange(0, len(self.distanceMatrix),1):
array_len = len(self.distanceMatrix[i]) - soustractor
accumulator += array_len
if accumulator > indexes_per_bin:
indexBinHash[rankIterator] = [lowBoundary, i]
# change the parameters
rankIterator += 1
lowBoundary = i
# empty the accumulator
accumulator = 0
soustractor += 1
if lowBoundary < i:
indexBinHash[binNo-1] = [lowBoundary, i]
print ">> Starting distance matrix creation:"
print ">> Clustering best solutions..."
else:
self.distanceMatrix = None
self.coordinateArray = None
indexBinHash = None
#synchronize all processers
comm.Barrier()
self.distanceMatrix=comm.bcast(self.distanceMatrix,root=0)
self.coordinateArray=comm.bcast(self.coordinateArray,root=0)
indexBinHash=comm.bcast(indexBinHash,root=0)
comm.Barrier()
exec 'import %s as constraint'%(self.constraint)
#clusters_file=open("%s/dist_matrix.dat"%self.params.output_folder,"w") # Xx this where you write the solution file
#generate a dummy multimer and extract the indexes of C alpha
# null_coordinate_array = np.zeros((len(self.data.structure_list)-1)*6)
# assembly = A.AssemblyHeteroMultimer(self.data.structure_list)
# assembly.place_all_mobile_structures(null_coordinate_array)
# Get the CA indexes already computed from the data class
self.CA_index_of_all_structures = self.data.CA_index_of_structures
#[m,index]=assembly.atomselect_of_structures(1,"*","*","CA",True) # -> extracting indexes of CA
#load the monomeric structure positions (needed for resetting atom position after displacement)
# s = Protein()
# s.import_pdb(self.params.pdb_file_name)
# coords=s.get_xyz()
if len(self.coordinateArray) > (size *3):
#----------------------------- first create the rmsd matrix
# creating variables to check for status of clustering of process 0
if rank == 0:
repetitions = indexBinHash[rank][1] - indexBinHash[rank][0]
totalIterations = len(self.coordinateArray) * repetitions
counter = 0
printresent = 1 # those are used not to repeat the state of the clustering
printPast = 0
counter = 0
#synchronize all processes (get current timestep and repeat from swarm state)
pieceOfCoordinateArray = np.array([])
if rank in indexBinHash.keys():
#Starting the creation with 2 loops
for n in xrange(indexBinHash[rank][0],len(self.coordinateArray),1):
if n == indexBinHash[rank][1]:
break
for m in xrange (n,len(self.coordinateArray),1):
# make sure you are not using the same structures against themselves
if n == m:
# # add a "wrong" distance in the matrix to only have half the matrix
pass
else:
# --------------------------------- MODIFY THE FLEXIBLE STRUCTURES FOR THE 1ST ASSEMBLY AND SET COORDS
for structure in self.data.structure_list:
if self.params.assembly_style=="flexible" and structure.flexibility != "NA":
len_rigid_dim = 6*(len(self.data.structure_list)-1)
i = 0
deform_coeffs = self.coordinateArray[n][len_rigid_dim : len_rigid_dim + i + len(structure.flexibility.eigenspace_size) ]
if self.params.mode=="seed":
pos_eig=structure.flexibility.proj[:,structure.flexibility.centroid]+deform_coeffs
code,min_dist=vq(structure.flexibility.proj.transpose(),np.array([pos_eig]))
target_frame=min_dist.argmin()
coords=structure.flexibility.all_coords[:,target_frame]
coords_reshaped=coords.reshape(len(coords)/3,3)
structure.monomer.set_xyz(coords_reshaped)
else:
coords=structure.monomer.get_xyz()
coords_reshaped=coords.reshape(len(coords)*3)
for n in xrange(0,len(deform_coeffs),1):
coords_reshaped+=deform_coeffs[n]*structure.flexibility.eigenvec[:,n]
structure.monomer.set_xyz(coords_reshaped.reshape(len(coords_reshaped)/3,3))
i += len(structure.flexibility.eigenspace_size)
else:
structure.monomer.set_xyz(structure.init_coords)
# ------------------- CREATING 1ST ASSEMBLY
assembly1 = A.AssemblyHeteroMultimer(self.data.structure_list_and_name)
assembly1.place_all_mobile_structures(self.coordinateArray[n][:len(self.coordinateArray[n])-1])
# get the coordinates of all the structures to get the coordinates of Assembly
coordinate_of_assembly1_structures = []
for structure_index in xrange(0,len(self.data.structure_list), 1):
coordinate_of_assembly1_structures.append(assembly1.get_structure_xyz(structure_index))
m1 = np.concatenate((coordinate_of_assembly1_structures),axis=0)
# --------------------------------- MODIFY THE FLEXIBLE STRUCTURES FOR THE 2ND ASSEMBLY
for structure in self.data.structure_list:
if self.params.assembly_style=="flexible" and structure.flexibility != "NA":
len_rigid_dim = 6*(len(self.data.structure_list)-1)
i = 0
deform_coeffs = self.coordinateArray[m][len_rigid_dim : len_rigid_dim + i + len(structure.flexibility.eigenspace_size) ]
if self.params.mode=="seed":
pos_eig=structure.flexibility.proj[:,structure.flexibility.centroid]+deform_coeffs
code,min_dist=vq(structure.flexibility.proj.transpose(),np.array([pos_eig]))
target_frame=min_dist.argmin()
coords=structure.flexibility.all_coords[:,target_frame]
coords_reshaped=coords.reshape(len(coords)/3,3)
structure.monomer.set_xyz(coords_reshaped)
else:
coords=structure.monomer.get_xyz()
coords_reshaped=coords.reshape(len(coords)*3)
for n in xrange(0,len(deform_coeffs),1):
coords_reshaped+=deform_coeffs[n]*structure.flexibility.eigenvec[:,n]
structure.monomer.set_xyz(coords_reshaped.reshape(len(coords_reshaped)/3,3))
i += len(structure.flexibility.eigenspace_size)
else:
structure.monomer.set_xyz(structure.init_coords)
# ------------------- CREATING 2ND ASSEMBLY
assembly2 = A.AssemblyHeteroMultimer(self.data.structure_list_and_name)
assembly2.place_all_mobile_structures(self.coordinateArray[m][:len(self.coordinateArray[m])-1])
# get the coordinates of all the structures to get the coordinates of Assembly
coordinate_of_assembly2_structures = []
for structure_index in xrange(0,len(self.data.structure_list), 1):
coordinate_of_assembly2_structures.append(assembly2.get_structure_xyz(structure_index))
m2 = np.concatenate((coordinate_of_assembly2_structures),axis=0)
# calculate RMSD between the 2
rmsd=self.align(m1,m2) # --> comes from Default.Postprocess.align()
self.distanceMatrix[n][m] = rmsd
if rank == 0:
counter += 1.0
printPresent = int((counter / totalIterations) * 100)
if (printPresent%10) == 0 and printPresent != printPast:
print "> ~"+str( printPresent )+" % structures clustered "
printPast = printPresent
pieceOfCoordinateArray = self.distanceMatrix[indexBinHash[rank][0]:indexBinHash[rank][1],:]
# print " Clustering process "+str(rank)+" finished"
comm.Barrier()
pieces = comm.gather(pieceOfCoordinateArray,root=0)
comm.Barrier()
if rank == 0:
self.distanceMatrix = []
for elem in pieces:
if len(elem) < 2:
pass
else:
for arrays in elem:
self.distanceMatrix.append(arrays)
lastRow = np.empty(len(self.coordinateArray))
lastRow.fill(100)
self.distanceMatrix.append(lastRow)
self.distanceMatrix = np.array(self.distanceMatrix)
np.transpose(self.distanceMatrix)
print len(self.distanceMatrix)
print len(self.distanceMatrix[0])
# np.savetxt('coordinateArray.txt', self.coordinateArray) # coordinateArray[0:50,0:50]
# np.savetxt('np_matrix.txt', self.distanceMatrix) # distanceMatrix[0:50]
else:
if rank == 0:
print ">> less than "+str(size*3)+" solutions, proceeding ..."
for n in xrange(0,len(self.coordinateArray),1):
for m in xrange (n,len(self.coordinateArray),1):
# make sure you are not using the same structures against themselves
if n == m:
# # add a "wrong" distance in the matrix to only have half the matrix
pass
else:
# --------------------------------- MODIFY THE FLEXIBLE STRUCTURES FOR THE 1ST ASSEMBLY
for structure in self.data.structure_list:
if self.params.assembly_style=="flexible" and structure.flexibility != "NA":
len_rigid_dim = 6*(len(self.data.structure_list)-1)
i = 0
deform_coeffs = self.coordinateArray[n][len_rigid_dim : len_rigid_dim + i + len(structure.flexibility.eigenspace_size) ]
if self.params.mode=="seed":
pos_eig=structure.flexibility.proj[:,structure.flexibility.centroid]+deform_coeffs
code,min_dist=vq(structure.flexibility.proj.transpose(),np.array([pos_eig]))
target_frame=min_dist.argmin()
coords=structure.flexibility.all_coords[:,target_frame]
coords_reshaped=coords.reshape(len(coords)/3,3)
structure.monomer.set_xyz(coords_reshaped)
else:
coords=structure.monomer.get_xyz()
coords_reshaped=coords.reshape(len(coords)*3)
for n in xrange(0,len(deform_coeffs),1):
coords_reshaped+=deform_coeffs[n]*structure.flexibility.eigenvec[:,n]
structure.monomer.set_xyz(coords_reshaped.reshape(len(coords_reshaped)/3,3))
i += len(structure.flexibility.eigenspace_size)
else:
structure.monomer.set_xyz(structure.init_coords)
# ------------------- CREATING 1ST ASSEMBLY
assembly1 = A.AssemblyHeteroMultimer(self.data.structure_list_and_name)
assembly1.place_all_mobile_structures(self.coordinateArray[n][:len(self.coordinateArray[n])-1])
# get the coordinates of all the structures to get the coordinates of Assembly
coordinate_of_assembly1_structures = []
for structure_index in xrange(0,len(self.data.structure_list), 1):
coordinate_of_assembly1_structures.append(assembly1.get_structure_xyz(structure_index))
m1 = np.concatenate((coordinate_of_assembly1_structures),axis=0)
# --------------------------------- MODIFY THE FLEXIBLE STRUCTURES FOR THE 2ND ASSEMBLY
for structure in self.data.structure_list:
if self.params.assembly_style=="flexible" and structure.flexibility != "NA":
len_rigid_dim = 6*(len(self.data.structure_list)-1)
i = 0
deform_coeffs = self.coordinateArray[n][len_rigid_dim : len_rigid_dim + i + len(structure.flexibility.eigenspace_size) ]
if self.params.mode=="seed":
pos_eig=structure.flexibility.proj[:,structure.flexibility.centroid]+deform_coeffs
code,min_dist=vq(structure.flexibility.proj.transpose(),np.array([pos_eig]))
target_frame=min_dist.argmin()
coords=structure.flexibility.all_coords[:,target_frame]
coords_reshaped=coords.reshape(len(coords)/3,3)
structure.monomer.set_xyz(coords_reshaped)
else:
coords=structure.monomer.get_xyz()
coords_reshaped=coords.reshape(len(coords)*3)
for n in xrange(0,len(deform_coeffs),1):
coords_reshaped+=deform_coeffs[n]*structure.flexibility.eigenvec[:,n]
structure.monomer.set_xyz(coords_reshaped.reshape(len(coords_reshaped)/3,3))
i += len(structure.flexibility.eigenspace_size)
else:
structure.monomer.set_xyz(structure.init_coords)
# ------------------- CREATING 2ND ASSEMBLY
assembly2 = A.AssemblyHeteroMultimer(self.data.structure_list_and_name)
assembly2.place_all_mobile_structures(self.coordinateArray[m][:len(self.coordinateArray[m])-1])
# get the coordinates of all the structures to get the coordinates of Assembly
coordinate_of_assembly2_structures = []
for structure_index in xrange(0,len(self.data.structure_list), 1):
coordinate_of_assembly2_structures.append(assembly2.get_structure_xyz(structure_index))
m2 = np.concatenate((coordinate_of_assembly2_structures),axis=0)
# calculate RMSD between the 2
rmsd=self.align(m1,m2) # --> comes from Default.Postprocess.align()
self.distanceMatrix[n][m] = rmsd
if rank == 0:
np.savetxt('coordinateArray.txt', self.coordinateArray)
np.savetxt('np_matrix.txt', self.distanceMatrix)
# launch the Clustering and tree drawing module
app = wx.App(False)
frame = CnD.MainFrame(None, "Clustering interface",self.OUTPUT_DIRECTORY ,self.params, self.data, self)
frame.RMSDPanel.computeMatrix()
if self.params.cluster_threshold == "NA":
frame.Show()
app.MainLoop()
else:
frame.RMSDPanel.convertCoordsAndExportPDB(self.params.cluster_threshold)
def write_pdb(self, centroidArray, average_RMSD_ARRAY):
iterant = 0 # this iterator is used (in a bad way :( ) to select the right average RMSD value when iterating over the centroids
clusters_file=open("%s/solutions.dat"%self.OUTPUT_DIRECTORY,"w")
# writing the tcl file:
tcl_file = open("%s/assembly.vmd"%self.OUTPUT_DIRECTORY,"w")
# import the constraint file:
self.constraint = self.params.constraint.split('.')[0]
#test if constraint file exists, and function constaint_check is available
try:
exec 'import %s as constraint'%(self.constraint)
except ImportError, e:
print "ERROR: load of user defined constraint function failed!"
sys.exit(1)
try: constraint.constraint_check
except NameError:
print 'ERROR: constraint_check function not found'
# HETEROMULTIMER ASSEMBLY
else:
print "extracting Complex multimer pdb"
for n in centroidArray:
for structure in self.data.structure_list:
if self.params.assembly_style=="flexible" and structure.flexibility != "NA":
len_rigid_dim = 6*(len(self.data.structure_list)-1) # careful here! the -1 is undecisive
i = 0
deform_coeffs = self.coordinateArray[n][len_rigid_dim : len_rigid_dim + i + len(structure.flexibility.eigenspace_size) ]
if self.params.mode=="seed":
pos_eig=structure.flexibility.proj[:,structure.flexibility.centroid]+deform_coeffs
code,min_dist=vq(structure.flexibility.proj.transpose(),np.array([pos_eig]))
target_frame=min_dist.argmin()
coords=structure.flexibility.all_coords[:,target_frame]
coords_reshaped=coords.reshape(len(coords)/3,3)
structure.monomer.set_xyz(coords_reshaped)
else:
coords=structure.monomer.get_xyz()
coords_reshaped=coords.reshape(len(coords)*3)
for n in xrange(0,len(deform_coeffs),1):
coords_reshaped+=deform_coeffs[n]*structure.flexibility.eigenvec[:,n]
structure.monomer.set_xyz(coords_reshaped.reshape(len(coords_reshaped)/3,3))
i += len(structure.flexibility.eigenspace_size)
else:
structure.monomer.set_xyz(structure.init_coords)
# ------------------------------ CREATE ASSEMBLY
print "creating PDB for centroid: "+str(iterant)
multimer1 = A.AssemblyHeteroMultimer(self.data.structure_list_and_name)
multimer1.place_all_mobile_structures(self.coordinateArray[n][:len(self.coordinateArray[n])-1])
# print the pdb file
multimer1.write_PDB("%s/assembly%s.pdb"%(self.OUTPUT_DIRECTORY,iterant))
# create the constraint:
measure = constraint.constraint_check(self.data, multimer1)
# ----------------------------- WRITING SOLUTION.DAT
l = []
f = []
# insert coordinates in the solution.dat file
f.append("assembly "+str(iterant)+" |")
for item in self.coordinateArray[n][: len(self.coordinateArray[n])-1]:
l.append(item)
f.append("%8.3f ")
#write constraint values
f.append("| ")
for item in measure:
l.append(item)
f.append("%8.3f ")
#write fitness
f.append("| %8.3f")
l.append(self.coordinateArray[n][-1])
# write average RMSD OF CLUSTER:
f.append("| %8.3f\n")
l.append(average_RMSD_ARRAY[iterant])
formatting=''.join(f)
clusters_file.write(formatting%tuple(l))
# --------------------------- WRITING TCL FILE
if iterant == 0:
tcl_file.write("mol new assembly"+str(iterant)+".pdb first 0 last -1 step 1 filebonds 1 autobonds 1 waitfor all \n")
else:
tcl_file.write("mol addfile assembly"+str(iterant)+".pdb type pdb first 0 last -1 step 1 filebonds 1 autobonds 1 waitfor all \n")
iterant += 1
tcl_file.write("mol delrep 0 top \n\
mol representation NewCartoon 0.300000 10.000000 4.100000 0 \n\
mol color Chain \n\
mol selection {all} \n\
mol material Opaque \n\
mol addrep top \n\
mol selupdate 0 top 0 \n\
mol colupdate 0 top 0 \n\
mol scaleminmax top 0 0.000000 0.000000 \n\
mol smoothrep top 0 0 \n\
mol drawframes top 0 {now}")
clusters_file.close()
tcl_file.close()
| degiacom/POW | HeteroMultimer.py | Python | gpl-3.0 | 51,166 | [
"VMD"
] | e59b71d15132d96f3695102b1fb9e0c0474d254759ba36f3f88dad46e8b62c5b |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'knossos_cuber_widgets.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(500, 344)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtWidgets.QTabWidget(Dialog)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label_target_dir = QtWidgets.QLabel(self.tab)
self.label_target_dir.setObjectName("label_target_dir")
self.gridLayout.addWidget(self.label_target_dir, 2, 0, 1, 1)
self.push_button_choose_target_dir = QtWidgets.QPushButton(self.tab)
self.push_button_choose_target_dir.setObjectName("push_button_choose_target_dir")
self.gridLayout.addWidget(self.push_button_choose_target_dir, 2, 1, 1, 1)
self.push_button_choose_source_dir = QtWidgets.QPushButton(self.tab)
self.push_button_choose_source_dir.setObjectName("push_button_choose_source_dir")
self.gridLayout.addWidget(self.push_button_choose_source_dir, 1, 1, 1, 1)
self.label_experiment_name = QtWidgets.QLabel(self.tab)
self.label_experiment_name.setObjectName("label_experiment_name")
self.gridLayout.addWidget(self.label_experiment_name, 0, 0, 1, 1)
self.label_source_dir = QtWidgets.QLabel(self.tab)
self.label_source_dir.setObjectName("label_source_dir")
self.gridLayout.addWidget(self.label_source_dir, 1, 0, 1, 1)
self.line_edit_experiment_name = QtWidgets.QLineEdit(self.tab)
self.line_edit_experiment_name.setObjectName("line_edit_experiment_name")
self.gridLayout.addWidget(self.line_edit_experiment_name, 0, 1, 1, 1)
self.label_source_format = QtWidgets.QLabel(self.tab)
self.label_source_format.setObjectName("label_source_format")
self.gridLayout.addWidget(self.label_source_format, 3, 0, 1, 1)
self.combo_box_source_format = QtWidgets.QComboBox(self.tab)
self.combo_box_source_format.setObjectName("combo_box_source_format")
self.gridLayout.addWidget(self.combo_box_source_format, 3, 1, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.tab_2)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.line_edit_scaling_z = QtWidgets.QLineEdit(self.tab_2)
self.line_edit_scaling_z.setObjectName("line_edit_scaling_z")
self.gridLayout_2.addWidget(self.line_edit_scaling_z, 1, 6, 1, 1)
self.label_boundaries_z = QtWidgets.QLabel(self.tab_2)
self.label_boundaries_z.setObjectName("label_boundaries_z")
self.gridLayout_2.addWidget(self.label_boundaries_z, 2, 5, 1, 1)
self.line_edit_boundaries_x = QtWidgets.QLineEdit(self.tab_2)
self.line_edit_boundaries_x.setObjectName("line_edit_boundaries_x")
self.gridLayout_2.addWidget(self.line_edit_boundaries_x, 2, 2, 1, 1)
self.label_source_datatype = QtWidgets.QLabel(self.tab_2)
self.label_source_datatype.setObjectName("label_source_datatype")
self.gridLayout_2.addWidget(self.label_source_datatype, 0, 0, 1, 1)
self.label_scaling_z = QtWidgets.QLabel(self.tab_2)
self.label_scaling_z.setObjectName("label_scaling_z")
self.gridLayout_2.addWidget(self.label_scaling_z, 1, 5, 1, 1)
self.label_scaling_x = QtWidgets.QLabel(self.tab_2)
self.label_scaling_x.setObjectName("label_scaling_x")
self.gridLayout_2.addWidget(self.label_scaling_x, 1, 1, 1, 1)
self.line_edit_scaling_x = QtWidgets.QLineEdit(self.tab_2)
self.line_edit_scaling_x.setObjectName("line_edit_scaling_x")
self.gridLayout_2.addWidget(self.line_edit_scaling_x, 1, 2, 1, 1)
self.label_boundaries = QtWidgets.QLabel(self.tab_2)
self.label_boundaries.setObjectName("label_boundaries")
self.gridLayout_2.addWidget(self.label_boundaries, 2, 0, 1, 1)
self.line_edit_scaling_y = QtWidgets.QLineEdit(self.tab_2)
self.line_edit_scaling_y.setObjectName("line_edit_scaling_y")
self.gridLayout_2.addWidget(self.line_edit_scaling_y, 1, 4, 1, 1)
self.line_edit_boundaries_z = QtWidgets.QLineEdit(self.tab_2)
self.line_edit_boundaries_z.setObjectName("line_edit_boundaries_z")
self.gridLayout_2.addWidget(self.line_edit_boundaries_z, 2, 6, 1, 1)
self.line_edit_boundaries_y = QtWidgets.QLineEdit(self.tab_2)
self.line_edit_boundaries_y.setObjectName("line_edit_boundaries_y")
self.gridLayout_2.addWidget(self.line_edit_boundaries_y, 2, 4, 1, 1)
self.label_scaling_y = QtWidgets.QLabel(self.tab_2)
self.label_scaling_y.setObjectName("label_scaling_y")
self.gridLayout_2.addWidget(self.label_scaling_y, 1, 3, 1, 1)
self.label_boundaries_y = QtWidgets.QLabel(self.tab_2)
self.label_boundaries_y.setObjectName("label_boundaries_y")
self.gridLayout_2.addWidget(self.label_boundaries_y, 2, 3, 1, 1)
self.label_scaling = QtWidgets.QLabel(self.tab_2)
self.label_scaling.setObjectName("label_scaling")
self.gridLayout_2.addWidget(self.label_scaling, 1, 0, 1, 1)
self.label_source_dimensions = QtWidgets.QLabel(self.tab_2)
self.label_source_dimensions.setObjectName("label_source_dimensions")
self.gridLayout_2.addWidget(self.label_source_dimensions, 3, 0, 1, 1)
self.line_edit_source_dimensions_x = QtWidgets.QLineEdit(self.tab_2)
self.line_edit_source_dimensions_x.setObjectName("line_edit_source_dimensions_x")
self.gridLayout_2.addWidget(self.line_edit_source_dimensions_x, 3, 2, 1, 1)
self.label_source_dimensions_y = QtWidgets.QLabel(self.tab_2)
self.label_source_dimensions_y.setObjectName("label_source_dimensions_y")
self.gridLayout_2.addWidget(self.label_source_dimensions_y, 3, 3, 1, 1)
self.line_edit_source_dimensions_y = QtWidgets.QLineEdit(self.tab_2)
self.line_edit_source_dimensions_y.setObjectName("line_edit_source_dimensions_y")
self.gridLayout_2.addWidget(self.line_edit_source_dimensions_y, 3, 4, 1, 1)
self.label_boundaries_x = QtWidgets.QLabel(self.tab_2)
self.label_boundaries_x.setObjectName("label_boundaries_x")
self.gridLayout_2.addWidget(self.label_boundaries_x, 2, 1, 1, 1)
self.label_source_dimensions_x = QtWidgets.QLabel(self.tab_2)
self.label_source_dimensions_x.setObjectName("label_source_dimensions_x")
self.gridLayout_2.addWidget(self.label_source_dimensions_x, 3, 1, 1, 1)
self.combo_box_source_datatype = QtWidgets.QComboBox(self.tab_2)
self.combo_box_source_datatype.setObjectName("combo_box_source_datatype")
self.combo_box_source_datatype.addItem("")
self.combo_box_source_datatype.addItem("")
self.gridLayout_2.addWidget(self.combo_box_source_datatype, 0, 1, 1, 6)
self.check_box_swap_axes = QtWidgets.QCheckBox(self.tab_2)
self.check_box_swap_axes.setEnabled(True)
self.check_box_swap_axes.setChecked(True)
self.check_box_swap_axes.setObjectName("check_box_swap_axes")
self.gridLayout_2.addWidget(self.check_box_swap_axes, 3, 5, 1, 2)
self.verticalLayout_4.addLayout(self.gridLayout_2)
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.tab_3)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_downsampling_workers = QtWidgets.QLabel(self.tab_3)
self.label_downsampling_workers.setObjectName("label_downsampling_workers")
self.gridLayout_3.addWidget(self.label_downsampling_workers, 4, 0, 1, 1)
self.label_buffer_size_in_cubes = QtWidgets.QLabel(self.tab_3)
self.label_buffer_size_in_cubes.setObjectName("label_buffer_size_in_cubes")
self.gridLayout_3.addWidget(self.label_buffer_size_in_cubes, 2, 0, 1, 1)
self.spin_box_downsampling_workers = QtWidgets.QSpinBox(self.tab_3)
self.spin_box_downsampling_workers.setMaximum(10000000)
self.spin_box_downsampling_workers.setProperty("value", 10)
self.spin_box_downsampling_workers.setObjectName("spin_box_downsampling_workers")
self.gridLayout_3.addWidget(self.spin_box_downsampling_workers, 4, 1, 1, 1)
self.spin_box_compression_workers = QtWidgets.QSpinBox(self.tab_3)
self.spin_box_compression_workers.setMaximum(1000000000)
self.spin_box_compression_workers.setProperty("value", 20)
self.spin_box_compression_workers.setObjectName("spin_box_compression_workers")
self.gridLayout_3.addWidget(self.spin_box_compression_workers, 5, 1, 1, 1)
self.label_cube_edge_length = QtWidgets.QLabel(self.tab_3)
self.label_cube_edge_length.setObjectName("label_cube_edge_length")
self.gridLayout_3.addWidget(self.label_cube_edge_length, 3, 0, 1, 1)
self.spin_box_cube_edge_length = QtWidgets.QSpinBox(self.tab_3)
self.spin_box_cube_edge_length.setMaximum(10000000)
self.spin_box_cube_edge_length.setProperty("value", 128)
self.spin_box_cube_edge_length.setObjectName("spin_box_cube_edge_length")
self.gridLayout_3.addWidget(self.spin_box_cube_edge_length, 3, 1, 1, 1)
self.radio_button_start_from_mag1 = QtWidgets.QRadioButton(self.tab_3)
self.radio_button_start_from_mag1.setChecked(False)
self.radio_button_start_from_mag1.setObjectName("radio_button_start_from_mag1")
self.gridLayout_3.addWidget(self.radio_button_start_from_mag1, 0, 1, 1, 1)
self.radio_button_start_from_2d_images = QtWidgets.QRadioButton(self.tab_3)
self.radio_button_start_from_2d_images.setEnabled(True)
self.radio_button_start_from_2d_images.setChecked(True)
self.radio_button_start_from_2d_images.setObjectName("radio_button_start_from_2d_images")
self.gridLayout_3.addWidget(self.radio_button_start_from_2d_images, 0, 0, 1, 1)
self.label_compression_workers = QtWidgets.QLabel(self.tab_3)
self.label_compression_workers.setObjectName("label_compression_workers")
self.gridLayout_3.addWidget(self.label_compression_workers, 5, 0, 1, 1)
self.check_box_downsample = QtWidgets.QCheckBox(self.tab_3)
self.check_box_downsample.setChecked(True)
self.check_box_downsample.setObjectName("check_box_downsample")
self.gridLayout_3.addWidget(self.check_box_downsample, 1, 0, 1, 1)
self.check_box_skip_already_generated = QtWidgets.QCheckBox(self.tab_3)
self.check_box_skip_already_generated.setChecked(True)
self.check_box_skip_already_generated.setObjectName("check_box_skip_already_generated")
self.gridLayout_3.addWidget(self.check_box_skip_already_generated, 1, 1, 1, 1)
self.spin_box_buffer_size_in_cubes = QtWidgets.QSpinBox(self.tab_3)
self.spin_box_buffer_size_in_cubes.setMaximum(100000000)
self.spin_box_buffer_size_in_cubes.setProperty("value", 1000)
self.spin_box_buffer_size_in_cubes.setObjectName("spin_box_buffer_size_in_cubes")
self.gridLayout_3.addWidget(self.spin_box_buffer_size_in_cubes, 2, 1, 1, 1)
self.label_io_workers = QtWidgets.QLabel(self.tab_3)
self.label_io_workers.setObjectName("label_io_workers")
self.gridLayout_3.addWidget(self.label_io_workers, 6, 0, 1, 1)
self.spin_box_io_workers = QtWidgets.QSpinBox(self.tab_3)
self.spin_box_io_workers.setMaximum(100000000)
self.spin_box_io_workers.setProperty("value", 20)
self.spin_box_io_workers.setObjectName("spin_box_io_workers")
self.gridLayout_3.addWidget(self.spin_box_io_workers, 6, 1, 1, 1)
self.verticalLayout_5.addLayout(self.gridLayout_3)
self.tabWidget.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.tab_4)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_path_to_openjpeg = QtWidgets.QLabel(self.tab_4)
self.label_path_to_openjpeg.setObjectName("label_path_to_openjpeg")
self.gridLayout_4.addWidget(self.label_path_to_openjpeg, 1, 0, 1, 1)
self.line_edit_path_to_openjpeg = QtWidgets.QLineEdit(self.tab_4)
self.line_edit_path_to_openjpeg.setObjectName("line_edit_path_to_openjpeg")
self.gridLayout_4.addWidget(self.line_edit_path_to_openjpeg, 1, 1, 1, 1)
self.push_button_path_to_openjpeg = QtWidgets.QPushButton(self.tab_4)
self.push_button_path_to_openjpeg.setObjectName("push_button_path_to_openjpeg")
self.gridLayout_4.addWidget(self.push_button_path_to_openjpeg, 1, 2, 1, 1)
self.label_compression_quality = QtWidgets.QLabel(self.tab_4)
self.label_compression_quality.setObjectName("label_compression_quality")
self.gridLayout_4.addWidget(self.label_compression_quality, 3, 0, 1, 1)
self.label_compression_algorithm = QtWidgets.QLabel(self.tab_4)
self.label_compression_algorithm.setObjectName("label_compression_algorithm")
self.gridLayout_4.addWidget(self.label_compression_algorithm, 2, 0, 1, 1)
self.label_gauss_filter = QtWidgets.QLabel(self.tab_4)
self.label_gauss_filter.setObjectName("label_gauss_filter")
self.gridLayout_4.addWidget(self.label_gauss_filter, 4, 0, 1, 1)
self.combo_box_compression_algorithm = QtWidgets.QComboBox(self.tab_4)
self.combo_box_compression_algorithm.setObjectName("combo_box_compression_algorithm")
self.combo_box_compression_algorithm.addItem("")
self.combo_box_compression_algorithm.addItem("")
self.gridLayout_4.addWidget(self.combo_box_compression_algorithm, 2, 1, 1, 2)
self.spin_box_compression_quality = QtWidgets.QSpinBox(self.tab_4)
self.spin_box_compression_quality.setProperty("value", 70)
self.spin_box_compression_quality.setObjectName("spin_box_compression_quality")
self.gridLayout_4.addWidget(self.spin_box_compression_quality, 3, 1, 1, 2)
self.spin_box_double_gauss_filter = QtWidgets.QDoubleSpinBox(self.tab_4)
self.spin_box_double_gauss_filter.setProperty("value", 0.5)
self.spin_box_double_gauss_filter.setObjectName("spin_box_double_gauss_filter")
self.gridLayout_4.addWidget(self.spin_box_double_gauss_filter, 4, 1, 1, 2)
self.check_box_compress = QtWidgets.QCheckBox(self.tab_4)
self.check_box_compress.setChecked(True)
self.check_box_compress.setObjectName("check_box_compress")
self.gridLayout_4.addWidget(self.check_box_compress, 0, 0, 1, 3)
self.verticalLayout_6.addLayout(self.gridLayout_4)
self.tabWidget.addTab(self.tab_4, "")
self.verticalLayout.addWidget(self.tabWidget)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.push_button_start_job = QtWidgets.QPushButton(Dialog)
self.push_button_start_job.setObjectName("push_button_start_job")
self.horizontalLayout.addWidget(self.push_button_start_job)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.line_edit_experiment_name, self.push_button_choose_source_dir)
Dialog.setTabOrder(self.push_button_choose_source_dir, self.push_button_choose_target_dir)
Dialog.setTabOrder(self.push_button_choose_target_dir, self.combo_box_source_format)
Dialog.setTabOrder(self.combo_box_source_format, self.push_button_start_job)
Dialog.setTabOrder(self.push_button_start_job, self.tabWidget)
Dialog.setTabOrder(self.tabWidget, self.combo_box_source_datatype)
Dialog.setTabOrder(self.combo_box_source_datatype, self.line_edit_scaling_x)
Dialog.setTabOrder(self.line_edit_scaling_x, self.line_edit_scaling_y)
Dialog.setTabOrder(self.line_edit_scaling_y, self.line_edit_scaling_z)
Dialog.setTabOrder(self.line_edit_scaling_z, self.line_edit_boundaries_x)
Dialog.setTabOrder(self.line_edit_boundaries_x, self.line_edit_boundaries_y)
Dialog.setTabOrder(self.line_edit_boundaries_y, self.line_edit_boundaries_z)
Dialog.setTabOrder(self.line_edit_boundaries_z, self.line_edit_source_dimensions_x)
Dialog.setTabOrder(self.line_edit_source_dimensions_x, self.line_edit_source_dimensions_y)
Dialog.setTabOrder(self.line_edit_source_dimensions_y, self.check_box_swap_axes)
Dialog.setTabOrder(self.check_box_swap_axes, self.line_edit_path_to_openjpeg)
Dialog.setTabOrder(self.line_edit_path_to_openjpeg, self.push_button_path_to_openjpeg)
Dialog.setTabOrder(self.push_button_path_to_openjpeg, self.combo_box_compression_algorithm)
Dialog.setTabOrder(self.combo_box_compression_algorithm, self.spin_box_compression_quality)
Dialog.setTabOrder(self.spin_box_compression_quality, self.spin_box_double_gauss_filter)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Knossos Cuber"))
self.label_target_dir.setToolTip(_translate("Dialog", "<html><head/><body><p>Where to write the output cubes.</p></body></html>"))
self.label_target_dir.setText(_translate("Dialog", "<no target dir>"))
self.push_button_choose_target_dir.setToolTip(_translate("Dialog", "<html><head/><body><p>Where to write the output cubes.</p></body></html>"))
self.push_button_choose_target_dir.setText(_translate("Dialog", "Choose Target Directory"))
self.push_button_choose_source_dir.setToolTip(_translate("Dialog", "<html><head/><body><p>Path to 2D images or full-resolution (mag1) cubes.</p></body></html>"))
self.push_button_choose_source_dir.setText(_translate("Dialog", "Choose Source Directory"))
self.label_experiment_name.setToolTip(_translate("Dialog", "<html><head/><body><p>Arbitrary experiment name identifier</p></body></html>"))
self.label_experiment_name.setText(_translate("Dialog", "Experiment Name"))
self.label_source_dir.setToolTip(_translate("Dialog", "<html><head/><body><p>Path to 2D images or full-resolution (mag1) cubes.</p></body></html>"))
self.label_source_dir.setText(_translate("Dialog", "<no source dir>"))
self.line_edit_experiment_name.setToolTip(_translate("Dialog", "<html><head/><body><p>Arbitrary experiment name identifier.</p></body></html>"))
self.line_edit_experiment_name.setText(_translate("Dialog", "test_stack"))
self.label_source_format.setToolTip(_translate("Dialog", "<html><head/><body><p>Whatever image format PIL can read in, or \'raw\' for faster cubing. Must be identical to the file name extension of input data.</p></body></html>"))
self.label_source_format.setText(_translate("Dialog", "Source Format"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Dialog", "General"))
self.line_edit_scaling_z.setToolTip(_translate("Dialog", "<html><head/><body><p>Voxel scaling, physical units per voxel.</p></body></html>"))
self.line_edit_scaling_z.setText(_translate("Dialog", "1.0"))
self.label_boundaries_z.setToolTip(_translate("Dialog", "<html><head/><body><p>Edge length of whole dataset, only required if not starting from 2D images.</p></body></html>"))
self.label_boundaries_z.setText(_translate("Dialog", "z"))
self.line_edit_boundaries_x.setToolTip(_translate("Dialog", "<html><head/><body><p>Edge length of whole dataset, only required if not starting from 2D images.</p></body></html>"))
self.label_source_datatype.setToolTip(_translate("Dialog", "<html><head/><body><p>Input image pixel data type, required only for raw.</p></body></html>"))
self.label_source_datatype.setText(_translate("Dialog", "Source Datatype"))
self.label_scaling_z.setToolTip(_translate("Dialog", "<html><head/><body><p>Voxel scaling, physical units per voxel.</p></body></html>"))
self.label_scaling_z.setText(_translate("Dialog", "z"))
self.label_scaling_x.setToolTip(_translate("Dialog", "<html><head/><body><p>Voxel scaling, physical units per voxel.</p></body></html>"))
self.label_scaling_x.setText(_translate("Dialog", "x"))
self.line_edit_scaling_x.setToolTip(_translate("Dialog", "<html><head/><body><p>Voxel scaling, physical units per voxel.</p></body></html>"))
self.line_edit_scaling_x.setText(_translate("Dialog", "1.0"))
self.label_boundaries.setToolTip(_translate("Dialog", "<html><head/><body><p>Edge length of whole dataset, only required if not starting from 2D images.</p></body></html>"))
self.label_boundaries.setText(_translate("Dialog", "Boundaries"))
self.line_edit_scaling_y.setToolTip(_translate("Dialog", "<html><head/><body><p>Voxel scaling, physical units per voxel.</p></body></html>"))
self.line_edit_scaling_y.setText(_translate("Dialog", "1.0"))
self.line_edit_boundaries_z.setToolTip(_translate("Dialog", "<html><head/><body><p>Edge length of whole dataset, only required if not starting from 2D images.</p></body></html>"))
self.line_edit_boundaries_y.setToolTip(_translate("Dialog", "<html><head/><body><p>Edge length of whole dataset, only required if not starting from 2D images.</p></body></html>"))
self.label_scaling_y.setToolTip(_translate("Dialog", "<html><head/><body><p>Voxel scaling, physical units per voxel.</p></body></html>"))
self.label_scaling_y.setText(_translate("Dialog", "y"))
self.label_boundaries_y.setToolTip(_translate("Dialog", "<html><head/><body><p>Edge length of whole dataset, only required if not starting from 2D images.</p></body></html>"))
self.label_boundaries_y.setText(_translate("Dialog", "y"))
self.label_scaling.setToolTip(_translate("Dialog", "<html><head/><body><p>Voxel scaling, physical units per voxel.</p></body></html>"))
self.label_scaling.setText(_translate("Dialog", "Scaling"))
self.label_source_dimensions.setToolTip(_translate("Dialog", "<html><head/><body><p>Input image dimensions. Required for raw images, otherwise determined directly from input images.</p></body></html>"))
self.label_source_dimensions.setText(_translate("Dialog", "Source Dimensions"))
self.line_edit_source_dimensions_x.setToolTip(_translate("Dialog", "<html><head/><body><p>Input image dimensions. Required for raw images, otherwise determined directly from input images.</p></body></html>"))
self.label_source_dimensions_y.setToolTip(_translate("Dialog", "<html><head/><body><p>Input image dimensions. Required for raw images, otherwise determined directly from input images.</p></body></html>"))
self.label_source_dimensions_y.setText(_translate("Dialog", "y"))
self.line_edit_source_dimensions_y.setToolTip(_translate("Dialog", "<html><head/><body><p>Input image dimensions. Required for raw images, otherwise determined directly from input images.</p></body></html>"))
self.label_boundaries_x.setText(_translate("Dialog", "x"))
self.label_source_dimensions_x.setText(_translate("Dialog", "x"))
self.combo_box_source_datatype.setToolTip(_translate("Dialog", "<html><head/><body><p>Input image pixel data type, required only for raw.</p></body></html>"))
self.combo_box_source_datatype.setItemText(0, _translate("Dialog", "uint8"))
self.combo_box_source_datatype.setItemText(1, _translate("Dialog", "uint16"))
self.check_box_swap_axes.setToolTip(_translate("Dialog", "<html><head/><body><p>Whether to swap axes. If yes, performs costly xy axes swap; this swap takes currently about as long as reading in each image file.</p></body></html>"))
self.check_box_swap_axes.setText(_translate("Dialog", "Swap Axes"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Dialog", "Dataset"))
self.label_downsampling_workers.setToolTip(_translate("Dialog", "<html><head/><body><p>Number of CPU cores to use for downsampling.</p></body></html>"))
self.label_downsampling_workers.setText(_translate("Dialog", "Downsampling workers"))
self.label_buffer_size_in_cubes.setToolTip(_translate("Dialog", "<html><head/><body><p>How many cubes will be used. Ideally, this number should be greater than the number of cubes in a single z-layer of cubes.</p></body></html>"))
self.label_buffer_size_in_cubes.setText(_translate("Dialog", "Buffer size in cubes"))
self.spin_box_downsampling_workers.setToolTip(_translate("Dialog", "<html><head/><body><p>Number of CPU cores to use for downsampling.</p></body></html>"))
self.spin_box_compression_workers.setToolTip(_translate("Dialog", "<html><head/><body><p>Numer of CPU cores to use for compression.</p></body></html>"))
self.label_cube_edge_length.setToolTip(_translate("Dialog", "<html><head/><body><p>Use more than CPU cores available; 20 is good if you\'re on a local RAID array, higher numbers might help if you read / write from NFS.</p></body></html>"))
self.label_cube_edge_length.setText(_translate("Dialog", "Cube edge length"))
self.spin_box_cube_edge_length.setToolTip(_translate("Dialog", "<html><head/><body><p>Use more than CPU cores available; 20 is good if you\'re on a local RAID array, higher numbers might help if you read / write from NFS.</p></body></html>"))
self.radio_button_start_from_mag1.setToolTip(_translate("Dialog", "<html><head/><body><p>If you already have full-resolution cubes, you can start from those cubes to generate downsampled and / or compressed cubes.</p></body></html>"))
self.radio_button_start_from_mag1.setText(_translate("Dialog", "Start from full resolution cubes"))
self.radio_button_start_from_2d_images.setToolTip(_translate("Dialog", "<html><head/><body><p>Whether to generate cubes from 2D images.</p></body></html>"))
self.radio_button_start_from_2d_images.setText(_translate("Dialog", "Start from 2D images"))
self.label_compression_workers.setToolTip(_translate("Dialog", "<html><head/><body><p>Numer of CPU cores to use for compression.</p></body></html>"))
self.label_compression_workers.setText(_translate("Dialog", "Compression workers"))
self.check_box_downsample.setToolTip(_translate("Dialog", "<html><head/><body><p>Create downsampled cubes for magnification pyramid.</p></body></html>"))
self.check_box_downsample.setText(_translate("Dialog", "Create downsampled cubes"))
self.check_box_skip_already_generated.setToolTip(_translate("Dialog", "<html><head/><body><p>Skip target cubes that already exist. Useful to continue a run that was aborted.</p></body></html>"))
self.check_box_skip_already_generated.setText(_translate("Dialog", "Skip already generated cubes"))
self.spin_box_buffer_size_in_cubes.setToolTip(_translate("Dialog", "<html><head/><body><p>How many cubes will be used. Ideally, this number should be greater than the number of cubes in a single z-layer of cubes.</p></body></html>"))
self.label_io_workers.setToolTip(_translate("Dialog", "<html><head/><body><p>Use more than CPU cores available; 20 is good if you\'re on a local RAID array, higher numbers might help if you read / write from NFS.</p></body></html>"))
self.label_io_workers.setText(_translate("Dialog", "IO Workers"))
self.spin_box_io_workers.setToolTip(_translate("Dialog", "<html><head/><body><p>Use more than CPU cores available; 20 is good if you\'re on a local RAID array, higher numbers might help if you read / write from NFS.</p></body></html>"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("Dialog", "Processing"))
self.label_path_to_openjpeg.setToolTip(_translate("Dialog", "<html><head/><body><p>Path to JPEG2000 compression executable, only needed when using JPEG2000.</p></body></html>"))
self.label_path_to_openjpeg.setText(_translate("Dialog", "Path to OpenJPEG"))
self.line_edit_path_to_openjpeg.setToolTip(_translate("Dialog", "<html><head/><body><p>Path to JPEG2000 compression executable, only needed when using JPEG2000.</p></body></html>"))
self.push_button_path_to_openjpeg.setText(_translate("Dialog", "..."))
self.label_compression_quality.setToolTip(_translate("Dialog", "<html><head/><body><p>0-100 for jpg (100 is best), 1-10 for j2k (lowest is best).</p></body></html>"))
self.label_compression_quality.setText(_translate("Dialog", "Compression Quality"))
self.label_compression_algorithm.setToolTip(_translate("Dialog", "<html><head/><body><p>Which compression algorithm to use for cubes.</p></body></html>"))
self.label_compression_algorithm.setText(_translate("Dialog", "Compression Algorithm"))
self.label_gauss_filter.setToolTip(_translate("Dialog", "<html><head/><body><p>Gaussian filter before compression to reduce artefacts, this really helps a lot for noisy data. Do NOT use it for near-noise-free data (SNR larger than about 10).</p></body></html>"))
self.label_gauss_filter.setText(_translate("Dialog", "Gauss filter "))
self.combo_box_compression_algorithm.setToolTip(_translate("Dialog", "<html><head/><body><p>Which compression algorithm to use for cubes.</p></body></html>"))
self.combo_box_compression_algorithm.setItemText(0, _translate("Dialog", "JPEG"))
self.combo_box_compression_algorithm.setItemText(1, _translate("Dialog", "JPEG2000"))
self.spin_box_compression_quality.setToolTip(_translate("Dialog", "<html><head/><body><p>0-100 for jpg (100 is best), 1-10 for j2k (lowest is best).</p></body></html>"))
self.spin_box_double_gauss_filter.setToolTip(_translate("Dialog", "<html><head/><body><p>Gaussian filter before compression to reduce artefacts, this really helps a lot for noisy data. Do NOT use it for near-noise-free data (SNR larger than about 10).</p></body></html>"))
self.check_box_compress.setToolTip(_translate("Dialog", "<html><head/><body><p>Create compressed cubes for efficient network streaming.</p></body></html>"))
self.check_box_compress.setText(_translate("Dialog", "Create compressed cubes"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("Dialog", "Compression"))
self.push_button_start_job.setToolTip(_translate("Dialog", "<html><head/><body><p>Start the cubing job. Depending on the size of the input data and the performance / number of compute cores on this machine, this operation may take a very long time.</p></body></html>"))
self.push_button_start_job.setText(_translate("Dialog", "Start Job"))
| knossos-project/knossos_cuber | knossos_cuber/knossos_cuber_widgets.py | Python | gpl-2.0 | 32,759 | [
"Gaussian"
] | 4ca28870fd3c7f4ba9fea7956f76e604e1bc8887c20d6ec19645f5898010fc35 |
"""
Utility for converting Grow sites that use multiple locales in one repository
over to a format that uses separate files for each locale.
This is required for all versions of Grow after 0.1.0.
Usage:
grow convert --type=content_locale_split
"""
from boltons import iterutils
from collections import OrderedDict
import collections
import copy
import logging
import os
import re
import yaml
try:
from yaml import CLoader as yaml_Loader
except ImportError:
from yaml import Loader as yaml_Loader
class Error(Exception):
def __init__(self, message):
super(Error, self).__init__(message)
self.message = message
class LocaleExistsError(Error):
pass
class LocaleMissingError(Error):
pass
BOUNDARY_REGEX = re.compile(r'^-{3,}$', re.MULTILINE)
DEFAULT_LOCALE_REGEX = re.compile(r'^[ ]{2,4}default_locale:[ ]+(.*)')
LOCALE_REGEX = re.compile(r'^\$locale:(.*)')
LOCALES_REGEX = re.compile(r'^\$locales:$')
LOCALIZED_KEY_REGEX = re.compile('(.*)@([^@]+)$')
LOCALIZATION_REGEX = re.compile(r'^\$localization:$')
ARRAY_ITEM_REGEX = re.compile(r'^[ ]*-[ ]+(.*)')
SUB_ITEM_REGEX = re.compile(r'^[ ]{2,4}')
COMBINED_TEMPLATE = '---\n{}\n---\n{}\n'
SINGLE_TEMPLATE = '{}\n'
def _update_deep(orig_dict, new_dict):
for k, v in new_dict.items():
if (k in orig_dict and isinstance(orig_dict[k], dict)
and isinstance(new_dict[k], collections.Mapping)):
_update_deep(orig_dict[k], new_dict[k])
else:
orig_dict[k] = new_dict[k]
class PlainText(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
class PlainTextYamlLoader(yaml_Loader):
def construct_plaintext(self, node):
return PlainText(node.tag, node.value)
class PlainTextYamlDumper(yaml.Dumper):
pass
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def plain_text_representer(dumper, data):
return dumper.represent_scalar(data.tag, data.value)
# Don't want to actually process the constructors, just keep the values
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
PlainTextYamlDumper.add_representer(OrderedDict, dict_representer)
PlainTextYamlDumper.add_representer(PlainText, plain_text_representer)
PlainTextYamlLoader.add_constructor(_mapping_tag, dict_constructor)
PlainTextYamlLoader.add_constructor(
'!_', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.csv', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.doc', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.json', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.static', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.url', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.yaml', PlainTextYamlLoader.construct_plaintext)
class ConversionDocument(object):
def __init__(self, pod, file_name, default_locale):
self.default_locale = default_locale
self.pod = pod
self.file_name = file_name
self.raw_content = pod.read_file(file_name)
self.normalize_raw_content()
@staticmethod
def determine_default_locale(front_matter):
parsed = yaml.load(front_matter, Loader=PlainTextYamlLoader)
if '$localization' in parsed:
return parsed['$localization'].get('default_locale', None)
return None
@staticmethod
def determine_locales(front_matter, default_locale=None,
remove_default_locale=True, remove_locales=True):
if not front_matter:
return [], None
parsed = yaml.load(front_matter, Loader=PlainTextYamlLoader)
if isinstance(parsed, str):
parsed = OrderedDict()
locales = parsed.get('$locales', [])
if '$locale' in parsed:
locales.append(parsed['$locale'])
if remove_default_locale:
if default_locale in locales:
locales.pop(locales.index(default_locale))
if '$locales' in parsed and default_locale in parsed['$locales']:
parsed['$locales'].pop(
parsed['$locales'].index(default_locale))
if '$locale' in parsed and parsed['$locale'] == default_locale:
del parsed['$locale']
if remove_locales:
if '$locales' in parsed:
del parsed['$locales']
if '$locale' in parsed:
del parsed['$locale']
return locales, yaml.dump(
parsed, Dumper=PlainTextYamlDumper,
allow_unicode=True, default_flow_style=False).strip() if parsed else ''
@staticmethod
def convert_for_locale(front_matter, locale, base=None):
if not front_matter:
parsed = {}
else:
parsed = yaml.load(front_matter, Loader=PlainTextYamlLoader)
def visit(path, key, value):
if not isinstance(key, str):
return key, value
if key.endswith('@#'):
return key, value
match = LOCALIZED_KEY_REGEX.match(key)
if not match:
return key, value
base_key = match.group(1)
locale_from_key = match.group(2)
if locale_from_key == locale:
# If there is a key without the trailing @ then override it.
parent = parsed
for path_key in path:
parent = parent[path_key]
if base_key in parent:
return base_key, value
return '{}@'.format(base_key), value
return False
parsed = iterutils.remap(parsed, visit=visit)
# If there are pre-existing fields, use them as a base for the locale
# specific values.
result = base or {}
_update_deep(result, parsed)
return result
@staticmethod
def format_file(front_matter=None, content=None):
if front_matter is None or front_matter.strip() == '':
return SINGLE_TEMPLATE.format(content.lstrip())
if content is None or content.strip() == '':
return SINGLE_TEMPLATE.format(front_matter.lstrip())
return COMBINED_TEMPLATE.format(front_matter.lstrip(), content.lstrip())
@staticmethod
def gather_for_locale(front_matter, locale):
if not front_matter:
return ''
parsed = yaml.load(front_matter, Loader=PlainTextYamlLoader)
locale_extra = OrderedDict()
def visit(path, key, value):
if not isinstance(key, str):
return key, value
if key.endswith('@#'):
return key, value
match = LOCALIZED_KEY_REGEX.match(key)
if not match:
return key, value
base_key = match.group(1)
locale_from_key = match.group(2)
if locale_from_key == locale:
# If there is a key without the trailing @ then override it.
parent = parsed
locale_parent = locale_extra
for path_key in path:
parent = parent[path_key]
if isinstance(locale_parent, list):
locale_parent = locale_parent[path_key]
elif path_key not in locale_parent:
if isinstance(parent, list):
locale_parent[path_key] = copy.deepcopy(parent)
else:
locale_parent[path_key] = OrderedDict()
locale_parent = locale_parent[path_key]
else:
locale_parent = locale_parent[path_key]
if base_key in parent:
locale_parent[base_key] = value
else:
locale_parent['{}@'.format(base_key)] = value
if key in locale_parent:
locale_parent.pop(key, None)
return False
return key, value
parsed = iterutils.remap(parsed, visit=visit)
return (yaml.dump(
parsed, Dumper=PlainTextYamlDumper,
allow_unicode=True, default_flow_style=False).strip() if parsed else '',
locale_extra)
def convert(self):
# Files with @ in them should already be converted.
if '@' in self.file_name:
logging.info(
'Filename contains a @, skipping: {}'.format(self.file_name))
return
# Ignore hidden files.
if self.file_name.startswith('.'):
logging.info(
'Filename starts with ., skipping: {}'.format(self.file_name))
return
# Ignore files that don't have an extention
_, file_extension = os.path.splitext(self.file_name)
if not file_extension:
logging.info(
'Filename does not have an extension, skipping: {}'.format(self.file_name))
return
pairs = list(self.split())
if len(pairs) <= 1:
logging.info(
'Single locale detected, skipping: {}'.format(self.file_name))
return
logging.info('Converting: {}'.format(self.file_name))
logging.info(' - Number of content pairs: {}'.format(len(pairs)))
# Determine if there is a file specific default_locale in first pair.
default_locale = ConversionDocument.determine_default_locale(
pairs[0][0]) or self.default_locale
logging.info(' - Using default_locale: {}'.format(default_locale))
# Base content will be pruned of localized values that belong in files.
base_front_matter = pairs[0][0]
for pair in pairs[1:]:
locales, _ = ConversionDocument.determine_locales(
pair[0], default_locale, remove_locales=False,
remove_default_locale=False)
if not locales:
raise LocaleMissingError(
'A section in {} is missing a locale and would be lost.'.format(self.file_name))
# Ensure that there are not existing files for the Locales.
for locale in locales:
locale_filename = self.file_name_for_locale(locale)
if self.pod.file_exists(locale_filename):
raise LocaleExistsError(
'{} locale section (defined in {}) already has a localized file ({}).\nPlease resolve this confilict and re-run the conversion.'.format(
locale, self.file_name, locale_filename))
# Store each locale contents until the end so we can combine multiple
# sections that may use the same locale.
locale_to_content = {}
locale_to_front_matter = {}
for pair in pairs[1:]:
locales, front_matter = ConversionDocument.determine_locales(
pair[0], default_locale, remove_locales=True,
remove_default_locale=False)
for locale in locales:
locale_to_content[locale] = pair[1]
if locale in locale_to_front_matter:
locale_extra = locale_to_front_matter[locale]
else:
base_front_matter, locale_extra = ConversionDocument.gather_for_locale(
base_front_matter, locale)
# Combine the extra front_matter from the base document with
# the pair specific front_matter.
locale_front_matter = ConversionDocument.convert_for_locale(
front_matter, locale, base=locale_extra)
# Store the front matter in case another section adds to it.
locale_to_front_matter[locale] = locale_front_matter
# Write the final locale files.
for locale, locale_front_matter in locale_to_front_matter.items():
content = locale_to_content.get(locale, None)
locale_filename = self.file_name_for_locale(locale)
logging.info('Writing: {}'.format(locale_filename))
locale_front_matter_dump = yaml.dump(
locale_front_matter, Dumper=PlainTextYamlDumper, allow_unicode=True,
default_flow_style=False).strip() if locale_front_matter else ''
output = ConversionDocument.format_file(
locale_front_matter_dump, content)
self.pod.write_file(locale_filename, output)
# Do the base file after specific tagged fields are removed.
pair = pairs[0]
content = pair[1]
_, base_front_matter = ConversionDocument.determine_locales(
base_front_matter, default_locale, remove_locales=False,
remove_default_locale=True)
logging.info('Writing: {}'.format(self.file_name))
output = ConversionDocument.format_file(base_front_matter, content)
self.pod.write_file(self.file_name, output)
def file_name_for_locale(self, locale_identifier):
if locale_identifier is None:
return self.file_name
file_parts = self.file_name.split('.')
return '{}@{}.{}'.format(
'.'.join(file_parts[:-1]), locale_identifier, file_parts[-1])
def normalize_raw_content(self):
# Clean and rewrite the yaml files that start with an empty section.
if self.file_name.endswith('.yaml') and self.raw_content.lstrip().startswith('---'):
logging.info('Normalizing: {}'.format(self.file_name))
self.raw_content = self.raw_content.lstrip()[3:].lstrip()
self.pod.write_file(self.file_name, self.raw_content)
def split(self):
parts = BOUNDARY_REGEX.split(self.raw_content)
# Remove the first, empty list item.
if parts[0].strip() == '':
parts.pop(0)
# Yaml files have no 'content'
if self.file_name.endswith('.yaml'):
while parts:
yield parts.pop(0).strip(), None
else:
while parts:
if len(parts) == 1:
yield None, parts.pop(0).strip()
break
front_matter = None
content = ''
if parts:
front_matter = parts.pop(0).strip() or None
if parts:
content = parts.pop(0).strip() or None
yield front_matter, content
class Converter(object):
@staticmethod
def convert(pod):
default_locale = pod.podspec.default_locale
logging.info('Using default locale: {}'.format(default_locale))
# Go through each document and convert to the updated format.
for root, dirs, files in pod.walk('/content'):
pod_dir = root.replace(pod.root, '')
for file_name in files:
doc = ConversionDocument(
pod, os.path.join(pod_dir, file_name), default_locale)
try:
doc.convert()
except:
print('Error trying to convert: {}'.format(
os.path.join(pod_dir, file_name)))
raise
| grow/grow | grow/conversion/content_locale_split.py | Python | mit | 15,529 | [
"VisIt"
] | 207d35dbf5724f53ce4d71c665da5cda0a968e2d6f338d0b0d88e25a1243ff38 |
import numpy as np
import multiprocessing as mp
import Starfish.constants as C
import csv
import h5py
from astropy.table import Table
from astropy.io import ascii
def multivariate_normal(cov):
np.random.seed()
N = cov.shape[0]
mu = np.zeros((N,))
result = np.random.multivariate_normal(mu, cov)
print("Generated residual")
return result
def random_draws(cov, num, nprocesses=mp.cpu_count()):
'''
Return random multivariate Gaussian draws from the covariance matrix.
:param cov: covariance matrix
:param num: number of draws
:returns: array of random draws
'''
N = cov.shape[0]
pool = mp.Pool(nprocesses)
result = pool.map(multivariate_normal, [cov]*num)
return np.array(result)
def envelope(spectra):
'''
Given a 2D array of spectra, shape (Nspectra, Npix), return the minimum/maximum envelope of these as two spectra.
'''
return np.min(spectra, axis=0), np.max(spectra, axis=0)
def std_envelope(spectra):
'''
Given a 2D array of spectra, shape (Nspectra, Npix), return the std envelope of these as two spectra.
'''
std = np.std(spectra, axis=0)
return -std, std
def visualize_draws(spectra, num=20):
'''
Given a 2D array of spectra, shape (Nspectra, Npix), visualize them to choose the most illustrative "random"
samples.
'''
import matplotlib.pyplot as plt
offset = 6 * np.std(spectra[0], axis=0)
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
for i, (spectrum, off) in enumerate(zip(spectra[:num], offset * np.arange(0, num))):
ax.axhline(off, ls=":", color="0.5")
ax.plot(spectrum + off, "k")
ax.annotate(i, (1, off))
plt.show()
def saveall(fig, fname, formats=[".png", ".pdf", ".svg"]):
'''
Save a matplotlib figure instance to many different formats
'''
for format in formats:
fig.savefig(fname + format)
#Set of kernels *exactly* as defined in extern/cov.h
@np.vectorize
def k_global(r, a, l):
r0=6.*l
taper = (0.5 + 0.5 * np.cos(np.pi * r/r0))
if r >= r0:
return 0.
else:
return taper * a**2 * (1 + np.sqrt(3) * r/l) * np.exp(-np.sqrt(3) * r/l)
@np.vectorize
def k_local(x0, x1, a, mu, sigma):
r0 = 4.0 * sigma #spot where kernel goes to 0
rx0 = C.c_kms / mu * np.abs(x0 - mu)
rx1 = C.c_kms / mu * np.abs(x1 - mu)
r_tap = rx0 if rx0 > rx1 else rx1 #choose the larger distance
if r_tap >= r0:
return 0.
else:
taper = (0.5 + 0.5 * np.cos(np.pi * r_tap/r0))
return taper * a**2 * np.exp(-0.5 * C.c_kms**2/mu**2 * ((x0 - mu)**2 + (x1 - mu)**2)/sigma**2)
def k_global_func(x0i, x1i, x0v=None, x1v=None, a=None, l=None):
x0 = x0v[x0i]
x1 = x1v[x1i]
r = np.abs(x1 - x0) * C.c_kms/x0
return k_global(r=r, a=a, l=l)
def k_local_func(x0i, x1i, x0v=None, x1v=None, a=None, mu=None, sigma=None):
x0 = x0v[x0i]
x1 = x1v[x1i]
return k_local(x0=x0, x1=x1, a=a, mu=mu, sigma=sigma)
#All of these return *dense* covariance matrices as defined in the paper
def Poisson_matrix(wl, sigma):
'''
Sigma can be an array or a single float.
'''
N = len(wl)
matrix = sigma**2 * np.eye(N)
return matrix
def k_global_matrix(wl, a, l):
N = len(wl)
matrix = np.fromfunction(k_global_func, (N,N), x0v=wl, x1v=wl, a=a, l=l, dtype=np.int)
return matrix
def k_local_matrix(wl, a, mu, sigma):
N = len(wl)
matrix = np.fromfunction(k_local_func, (N, N), x0v=wl, x1v=wl, a=a, mu=mu, sigma=sigma, dtype=np.int)
return matrix
# Tools to examine Markov Chain Runs
def h5read(fname, burn=0, thin=1):
'''
Read the flatchain from the HDF5 file and return it.
'''
fid = h5py.File(fname, "r")
assert burn < fid["samples"].shape[0]
print("{} burning by {} and thinning by {}".format(fname, burn, thin))
flatchain = fid["samples"][burn::thin]
fid.close()
return flatchain
def csvread(fname, burn=0, thin=1):
'''
Read the flatchain from a CSV file and return it.
'''
flatchain = np.genfromtxt(fname, skip_header=1, dtype=float, delimiter=",")[burn::thin]
return flatchain
def gelman_rubin(samplelist):
'''
Given a list of flatchains from separate runs (that already have burn in cut
and have been trimmed, if desired), compute the Gelman-Rubin statistics in
Bayesian Data Analysis 3, pg 284. If you want to compute this for fewer
parameters, then slice the list before feeding it in.
'''
full_iterations = len(samplelist[0])
assert full_iterations % 2 == 0, "Number of iterations must be even. Try cutting off a different number of burn in samples."
shape = samplelist[0].shape
#make sure all the chains have the same number of iterations
for flatchain in samplelist:
assert len(flatchain) == full_iterations, "Not all chains have the same number of iterations!"
assert flatchain.shape == shape, "Not all flatchains have the same shape!"
#make sure all chains have the same number of parameters.
#Following Gelman,
# n = length of split chains
# i = index of iteration in chain
# m = number of split chains
# j = index of which chain
n = full_iterations//2
m = 2 * len(samplelist)
nparams = samplelist[0].shape[-1] #the trailing dimension of a flatchain
#Block the chains up into a 3D array
chains = np.empty((n, m, nparams))
for k, flatchain in enumerate(samplelist):
chains[:,2*k,:] = flatchain[:n] #first half of chain
chains[:,2*k + 1,:] = flatchain[n:] #second half of chain
#Now compute statistics
#average value of each chain
avg_phi_j = np.mean(chains, axis=0, dtype="f8") #average over iterations, now a (m, nparams) array
#average value of all chains
avg_phi = np.mean(chains, axis=(0,1), dtype="f8") #average over iterations and chains, now a (nparams,) array
B = n/(m - 1.0) * np.sum((avg_phi_j - avg_phi)**2, axis=0, dtype="f8") #now a (nparams,) array
s2j = 1./(n - 1.) * np.sum((chains - avg_phi_j)**2, axis=0, dtype="f8") #now a (m, nparams) array
W = 1./m * np.sum(s2j, axis=0, dtype="f8") #now a (nparams,) arary
var_hat = (n - 1.)/n * W + B/n #still a (nparams,) array
std_hat = np.sqrt(var_hat)
R_hat = np.sqrt(var_hat/W) #still a (nparams,) array
data = Table({ "Value": avg_phi,
"Uncertainty": std_hat},
names=["Value", "Uncertainty"])
print(data)
ascii.write(data, sys.stdout, Writer = ascii.Latex, formats={"Value":"%0.3f", "Uncertainty":"%0.3f"}) #
#print("Average parameter value: {}".format(avg_phi))
#print("std_hat: {}".format(np.sqrt(var_hat)))
print("R_hat: {}".format(R_hat))
if np.any(R_hat >= 1.1):
print("You might consider running the chain for longer. Not all R_hats are less than 1.1.")
def plot(flatchain, base, format=".png"):
'''
Make a triangle plot
'''
import triangle
labels = [r"$T_\mathrm{eff}$ [K]", r"$\log g$ [dex]", r"$Z$ [dex]",
r"$v_z$ [km/s]", r"$v \sin i$ [km/s]", r"$\log_{10} \Omega$"]
figure = triangle.corner(flatchain, quantiles=[0.16, 0.5, 0.84],
plot_contours=True, plot_datapoints=False, labels=labels, show_titles=True)
figure.savefig(base + "triangle" + format)
def paper_plot(flatchain, base, format=".pdf"):
'''
Make a triangle plot of just M vs i
'''
import matplotlib
matplotlib.rc("font", size=8)
matplotlib.rc("lines", linewidth=0.5)
matplotlib.rc("axes", linewidth=0.8)
matplotlib.rc("patch", linewidth=0.7)
import matplotlib.pyplot as plt
#matplotlib.rc("axes", labelpad=10)
from matplotlib.ticker import FormatStrFormatter as FSF
from matplotlib.ticker import MaxNLocator
import triangle
labels = [r"$M_\ast\enskip [M_\odot]$", r"$i_d \enskip [{}^\circ]$"]
#r"$r_c$ [AU]", r"$T_{10}$ [K]", r"$q$", r"$\log M_\textrm{CO} \enskip [\log M_\oplus]$",
#r"$\xi$ [km/s]"]
inds = np.array([0, 6, ]) #1, 2, 3, 4, 5])
K = len(labels)
fig, axes = plt.subplots(K, K, figsize=(3., 2.5))
figure = triangle.corner(flatchain[:, inds], plot_contours=True,
plot_datapoints=False, labels=labels, show_titles=False,
fig=fig)
for ax in axes[:, 0]:
ax.yaxis.set_label_coords(-0.4, 0.5)
for ax in axes[-1, :]:
ax.xaxis.set_label_coords(0.5, -0.4)
figure.subplots_adjust(left=0.2, right=0.8, top=0.95, bottom=0.2)
figure.savefig(base + "ptriangle" + format)
def plot_walkers(flatchain, base, start=0, end=-1, labels=None):
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
# majorLocator = MaxNLocator(nbins=4)
ndim = len(flatchain[0, :])
sample_num = np.arange(len(flatchain[:,0]))
sample_num = sample_num[start:end]
samples = flatchain[start:end]
plt.rc("ytick", labelsize="x-small")
fig, ax = plt.subplots(nrows=ndim, sharex=True)
for i in range(0, ndim):
ax[i].plot(sample_num, samples[:,i])
ax[i].yaxis.set_major_locator(MaxNLocator(nbins=6, prune="both"))
if labels is not None:
ax[i].set_ylabel(labels[i])
ax[-1].set_xlabel("Sample number")
fig.subplots_adjust(hspace=0)
fig.savefig(base + "walkers.png")
plt.close(fig)
def estimate_covariance(flatchain, base, ndim=0):
if ndim == 0:
d = flatchain.shape[1]
else:
d = ndim
import matplotlib.pyplot as plt
#print("Parameters {}".format(flatchain.param_tuple))
#samples = flatchain.samples
cov = np.cov(flatchain, rowvar=0)
#Now try correlation coefficient
cor = np.corrcoef(flatchain, rowvar=0)
print("Correlation coefficient")
print(cor)
# Make a plot of correlation coefficient.
fig, ax = plt.subplots(figsize=(0.5 * d, 0.5 * d), nrows=1, ncols=1)
ext = (0.5, d + 0.5, 0.5, d + 0.5)
ax.imshow(cor, origin="upper", vmin=-1, vmax=1, cmap="bwr", interpolation="none", extent=ext)
fig.savefig("cor_coefficient.png")
print("'Optimal' jumps with covariance (units squared)")
opt_jump = 2.38**2/d * cov
# opt_jump = 1.7**2/d * cov # gives about ??
print(opt_jump)
print("Standard deviation")
std_dev = np.sqrt(np.diag(cov))
print(std_dev)
print("'Optimal' jumps")
print(2.38/np.sqrt(d) * std_dev)
np.save(base + "opt_jump.npy", opt_jump)
def cat_list(file, flatchainList):
'''
Given a list of flatchains, concatenate all of these and write them to a
single HDF5 file.
'''
#Write this out to the new file
print("Opening", file)
hdf5 = h5py.File(file, "w")
cat = np.concatenate(flatchainList, axis=0)
dset = hdf5.create_dataset("samples", cat.shape, compression='gzip',
compression_opts=9)
dset[:] = cat
# dset.attrs["parameters"] = "{}".format(param_tuple)
hdf5.close()
def main():
cov = np.eye(20)
draws = random_draws(cov, 5)
print(draws)
if __name__=='__main__':
main()
| gully/Starfish | Starfish/utils.py | Python | bsd-3-clause | 11,039 | [
"Gaussian"
] | ac699431019c5bddd00a6246982bc9572546600543898be629462f0d51fd14ad |
#!/usr/bin/env python
"""This tool runs free energy calculations with Amber MMPBSA.py.
"""
__author__ = "Ole Weidner"
__email__ = "ole.weidner@rutgers.edu"
__copyright__ = "Copyright 2013-2014, The RADICAL Project at Rutgers"
__license__ = "MIT"
import imp
import os, sys, uuid
import optparse
#from radical.ensemblemd.htbac.simchain import run_benchmark
from radical.ensemblemd.htbac.simchain import run_workload
from radical.ensemblemd.htbac.simchain import run_checkenv
from radical.ensemblemd.htbac.simchain import run_testjob
# ----------------------------------------------------------------------------
#
def main():
usage = "usage: %prog --config [--checkenv, --testjob, --workload --benchmark]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-c', '--config',
metavar='CONFIG',
dest='config',
help='The user-specific configuration file. (REQUIRED)')
parser.add_option('--checkenv',
dest='checkenv',
action="store_true",
help='Launches a test job to check the remote execution environment.')
parser.add_option('--testjob',
dest='testjob',
action="store_true",
help='Launches a test job with a single task on the remote site.')
parser.add_option('--benchmark',
dest='benchmark',
action="store_true",
help='Launches a series of test jobs to test performance and scalability.')
parser.add_option('-w', '--workload',
metavar='WORKLOAD',
dest='workload',
help='Launches the tasks defined in the provided workload description file.')
# PARSE THE CMD LINE OPTIONS
(options, args) = parser.parse_args()
if options.config is None:
parser.error("You must define a configuration (-c/--config). Try --help for help.")
config = imp.load_source('config', options.config)
if options.checkenv is True:
# RUN THE CHECK ENVIRONMENT JOB
result = run_checkenv(config=config)
sys.exit(result)
elif options.testjob is True:
# RUN THE SIM TEST JOB
result = run_testjob(config=config)
sys.exit(result)
elif options.benchmark is True:
# RUN THE SIM BENCHMARK JOBS
result = run_benchmark(config=config)
sys.exit(result)
elif options.workload is not None:
# RUN A WORKLOAD
workload = imp.load_source('workload', options.workload)
from workload import WORKLOAD
result = run_workload(config=config, workload=WORKLOAD)
sys.exit(result)
else:
# ERROR - INVALID PARAMETERS
parser.error("You must run either --checkenv, --testjob, --workload or --benchmark. Try --help for help.")
sys.exit(1)
| radical-cybertools/HT-BAC | src/radical/ensemblemd/htbac/bin/simchain.py | Python | mit | 2,959 | [
"Amber"
] | a5affdd87e7bdde1c1128bc6efddaf5e6917f79d9f72149d2466ae511293f765 |
"""
Add-on to make the MonitoringClient works with an IOLoop from a Tornado Server
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import tornado.ioloop
from DIRAC import gLogger
class MonitoringFlusherTornado(object):
"""
This class flushes all monitoring clients registered
Works with the Tornado IOLoop
"""
def __init__(self):
self.__mcList = []
gLogger.info("Using MonitoringClient in IOLoop mode")
# Here we don't need to use IOLoop.current(), tornado will attach periodic callback to the current IOLoop himself
# We set callback every 5 minnutes
tornado.ioloop.PeriodicCallback(self.flush, 300000).start()
def flush(self, allData=False):
gLogger.info("Flushing monitoring")
for mc in self.__mcList:
mc.flush(allData)
def registerMonitoringClient(self, mc):
if mc not in self.__mcList:
self.__mcList.append(mc)
| ic-hep/DIRAC | src/DIRAC/FrameworkSystem/Client/MonitoringClientIOLoop.py | Python | gpl-3.0 | 1,027 | [
"DIRAC"
] | f6bfa93b9429fa29a46f4f3bf5ab9238b5d4c5d96ff9b86a1bee0833b667b8b3 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return,invalid-name,len-as-condition,too-many-nested-blocks
"""
A pass for manifesting explicit memory allocations.
"""
import numpy as np
from tvm.ir.transform import PassContext, module_pass
from tvm.relay.transform import InferType
from tvm import nd, container
from ..function import Function
from ..expr_functor import ExprVisitor, ExprMutator
from ..scope_builder import ScopeBuilder
from .. import op
from ... import DataType, register_func
from .. import ty, expr
from ..backend import compile_engine
from ..op.memory import flatten_tuple_type, from_tuple_type, to_tuple_type
from ... import cpu
from ..op.memory import alloc_storage
from ..analysis import context_analysis
from ..._ffi.runtime_ctypes import TVMContext
def alloc_tensor(storage, shape, dtype="float32", assert_shape=None):
offset = expr.const(0, dtype="int64")
return op.memory.alloc_tensor(storage, offset, shape, dtype, assert_shape)
def is_primitive(call):
return (
hasattr(call, "op")
and hasattr(call.op, "attrs")
and hasattr(call.op.attrs, "Primitive")
and int(call.op.attrs.Primitive) == 1
)
def is_device_copy(func):
"""
Check if the current relay expression is a device copy call. We can simply check
the body of it if it is a function becase the device_copy op is opaque.
"""
if isinstance(func, Function):
body = func.body
return isinstance(body, expr.Call) and body.op == op.get("device_copy")
if isinstance(func, expr.Call):
return func.op == op.get("device_copy")
return False
class CheckReshapeOnly(ExprVisitor):
"""A pass to check if the fused op contains only reshape ops."""
def __init__(self):
super().__init__()
self._reshape_ops = [
op.get("reshape"),
op.get("contrib_reverse_reshape"),
op.get("dyn.reshape"),
]
self.reshape_only = True
def visit_call(self, call):
if not self.reshape_only:
return
if call.op not in self._reshape_ops:
self.reshape_only = False
for arg in call.args:
self.visit(arg)
def is_reshape_only(func):
"""Check if the primitive function contains only reshape ops."""
check = CheckReshapeOnly()
check.visit(func)
return check.reshape_only
class ManifestAllocPass(ExprMutator):
"""A pass for explicitly manifesting all memory allocations in Relay."""
def __init__(self, target_host, context_analysis_map):
self.invoke_tvm = op.vm.invoke_tvm_op
self.shape_func = op.vm.shape_func
self.shape_of = op.vm.shape_of
self.reshape_tensor = op.vm.reshape_tensor
self.scopes = [ScopeBuilder()]
self.target_host = target_host
self.default_context = cpu(0)
self.compute_dtype = "int64"
self.context_analysis_map = context_analysis_map
super().__init__()
def get_context(self, exp):
"""Get the context of a given expression"""
assert exp in self.context_analysis_map, exp.astext(False)
val = self.context_analysis_map[exp]
# val[0], val[1] are device_type and device_id, respectively.
# We don't need to unpack after porting this pass to C++.
assert len(val) == 2
return TVMContext(val[0].value, val[1].value)
def device_copy(self, inp, src_ctx, dst_ctx):
"""Insert a device copy node."""
return self.visit(op.tensor.device_copy(inp, src_ctx, dst_ctx))
def current_scope(self):
return self.scopes[-1]
def visit_tuple(self, tup):
scope = self.current_scope()
new_fields = []
for field in tup.fields:
field = self.visit(field)
if isinstance(field, expr.Constant):
field = scope.let("const", field)
new_fields.append(field)
return expr.Tuple(new_fields)
def compute_alignment(self, dtype):
dtype = DataType(dtype)
align = (dtype.bits // 8) * dtype.lanes
# MAGIC CONSTANT FROM device_api.h
if align < 64:
align = 64
return expr.const(align, dtype="int64")
def compute_storage_in_relay(self, shape, dtype):
dtype = DataType(dtype)
els = op.prod(shape)
num = expr.const(dtype.bits * dtype.lanes, self.compute_dtype)
num = num + expr.const(7, self.compute_dtype)
div = expr.const(8, self.compute_dtype)
return els * (num / div)
def compute_storage(self, tensor_type):
dtype = DataType(tensor_type.dtype)
shape = [int(sh) for sh in tensor_type.shape]
size = 1
for sh in shape:
size *= sh
size *= (dtype.bits * dtype.lanes + 7) // 8
return expr.const(size, dtype=self.compute_dtype)
def make_static_allocation(self, scope, tensor_type, ctx, name_hint):
"""Allocate a tensor with a statically known shape."""
shape = [int(sh) for sh in tensor_type.shape]
if len(shape) == 0:
shape = expr.const(np.empty((), dtype=self.compute_dtype), dtype=self.compute_dtype)
else:
shape = expr.const(np.array(shape), dtype=self.compute_dtype)
size = self.compute_storage(tensor_type)
alignment = self.compute_alignment(tensor_type.dtype)
dtype = tensor_type.dtype
sto = scope.let("storage_{0}".format(name_hint), alloc_storage(size, alignment, ctx, dtype))
# TODO(@jroesch): There is a bug with typing based on the constant shape.
tensor = alloc_tensor(sto, shape, dtype, tensor_type.shape)
return scope.let("tensor_{0}".format(name_hint), tensor)
def visit_let(self, let):
scope = ScopeBuilder()
self.scopes.append(scope)
while isinstance(let, expr.Let):
new_val = self.visit(let.value)
scope.let(let.var, new_val)
let = let.body
new_body = self.visit(let)
scope.ret(new_body)
self.scopes.pop()
return scope.get()
def emit_shape_func(self, scope, func, new_args):
"""Insert the shape function given a primitive function."""
shape_func_ins = []
engine = compile_engine.get()
cfunc = engine.lower_shape_func(func, self.target_host)
input_states = cfunc.shape_func_param_states
is_inputs = []
input_pos = 0
cpu_ctx = nd.cpu(0)
for i, (arg, state) in enumerate(zip(new_args, input_states)):
state = int(state)
# Pass Shapes
if state == 2:
for j, subexp in enumerate(from_tuple_type(arg.type_annotation, arg)):
sh_of = self.visit(self.shape_of(subexp))
shape_func_ins.append(scope.let("in_shape_{0}".format(input_pos + j), sh_of))
input_pos += 1
is_inputs.append(0)
# Pass Inputs
elif state == 1:
new_arg = self.visit(arg)
ctx = self.get_context(arg)
if ctx.device_type != cpu_ctx.device_type:
new_arg = self.device_copy(new_arg, ctx, cpu_ctx)
shape_func_ins.append(scope.let("in_shape_{0}".format(input_pos), new_arg))
input_pos += 1
is_inputs.append(1)
else:
# TODO(@jroesch): handle 3rd case
raise Exception("unsupported shape function input state")
out_shapes = []
for i, out in enumerate(cfunc.outputs):
tt = ty.TensorType(out.shape, out.dtype)
# Put shape func on CPU. This also ensures that everything between
# shape_of and shape_func are on CPU.
alloc = self.make_static_allocation(scope, tt, cpu_ctx, i)
alloc = scope.let("shape_func_out_{0}".format(i), alloc)
out_shapes.append(alloc)
shape_call = self.shape_func(
func, expr.Tuple(shape_func_ins), expr.Tuple(out_shapes), is_inputs
)
scope.let("shape_func", shape_call)
return out_shapes
def dynamic_invoke(self, scope, func, ins, new_args, out_types, ret_type):
"""Generate the code for invoking a TVM op with a dynamic shape."""
out_shapes = self.emit_shape_func(scope, func, new_args)
storages = []
func_ctx = self.get_context(func)
for i, (out_shape, out_type) in enumerate(zip(out_shapes, out_types)):
size = self.compute_storage_in_relay(out_shape, out_type.dtype)
alignment = self.compute_alignment(out_type.dtype)
sto = scope.let(
"storage_{i}".format(i=i), alloc_storage(size, alignment, func_ctx, out_type.dtype)
)
storages.append(sto)
outs = []
sh_ty_storage = zip(out_shapes, out_types, storages)
for i, (out_shape, out_type, storage) in enumerate(sh_ty_storage):
alloc = alloc_tensor(storage, out_shape, out_type.dtype, out_type.shape)
alloc = scope.let("out_{i}".format(i=i), alloc)
outs.append(alloc)
tuple_outs = expr.Tuple(outs)
invoke = self.invoke_tvm(func, ins, tuple_outs)
scope.let("", invoke)
return to_tuple_type(ret_type, tuple_outs.fields)
def emit_reshape_tensor(self, scope, func, new_args, ret_type):
if self.is_dynamic(ret_type):
out_shapes = self.emit_shape_func(scope, func, new_args)
shape_expr = out_shapes[0]
else:
# constant output shape
shape = [int(dim) for dim in ret_type.shape]
shape_expr = expr.const(shape, dtype=self.compute_dtype)
return self.reshape_tensor(new_args[0], shape_expr, ret_type.shape)
def is_dynamic(self, ret_type):
is_dynamic = ty.is_dynamic(ret_type)
# TODO(@jroesch): restore this code, more complex then it seems
# for arg in call.args:
# is_dynamic = is_dynamic or arg.checked_type.is_dynamic()
return is_dynamic
def visit_call(self, call):
if is_primitive(call):
# Because we are in ANF we do not need to visit the arguments.
scope = self.current_scope()
new_args = [self.visit(arg) for arg in call.args]
ins = expr.Tuple(new_args)
ret_type = call.checked_type
out_types = flatten_tuple_type(ret_type)
if is_reshape_only(call.op):
# Handle fused op that only contains reshape op
return self.emit_reshape_tensor(scope, call.op, new_args, ret_type)
if is_device_copy(call.op):
# Handle device copy op
if isinstance(call.op, Function):
attr = call.op.body.attrs
else:
attr = call.attr
return self.device_copy(
new_args[0], TVMContext(attr.src_dev_type, 0), TVMContext(attr.dst_dev_type, 0)
)
if self.is_dynamic(ret_type):
# Handle dynamic case.
return self.dynamic_invoke(scope, call.op, ins, new_args, out_types, ret_type)
# Handle static case.
outs = []
for i, out_ty in enumerate(out_types):
ctx = self.get_context(call)
assert isinstance(ctx, TVMContext)
out = self.make_static_allocation(scope, out_ty, ctx, i)
outs.append(out)
output = expr.Tuple(outs)
invoke = self.invoke_tvm(call.op, ins, output)
scope.let("", invoke)
return to_tuple_type(ret_type, output.fields)
return super().visit_call(call)
def mk_analysis_annotator(results):
"""Pretty print the annotated relay program with device info"""
def _annotator(exp):
if exp in results:
val = results[exp]
assert len(val) == 2
ctx = TVMContext(val[0].value, val[1].value)
return f"<{ctx}>"
else:
return ""
return _annotator
@module_pass(opt_level=0)
class ManifestAlloc:
"""The explicit pass wrapper around ManifestAlloc."""
# TODO(zhiics, jroesch) Port this pass to C++.
def __init__(self, target_host, targets):
self.target_host = target_host
self.targets = targets
def transform_module(self, mod, _):
"""Invokes the pass"""
# TODO(@jroesch): Is there a way to do one shot initialization?
# can we have def pass_init?
mod.import_from_std("core.rly")
mod = InferType()(mod)
assert isinstance(self.targets, (dict, container.Map))
if len(self.targets) > 1:
pass_ctx = PassContext.current()
if "relay.fallback_device_type" in pass_ctx.config:
fallback_ctx = nd.context(pass_ctx.config["relay.fallback_device_type"])
else:
fallback_ctx = cpu(0)
ca = context_analysis(mod, TVMContext(fallback_ctx.device_type, 0))
else:
if isinstance(self.targets, dict):
dev = list(self.targets.keys())[0]
else:
dev, _ = self.targets.items()[0]
ca = context_analysis(mod, nd.context(dev.value))
# The following code can be used for debugging the module after
# annotation.
# print(mod.astext(show_meta_data=False, annotate=mk_analysis_annotator(ca)))
gv_funcs = mod.functions
for gv, f in gv_funcs.items():
ea = ManifestAllocPass(self.target_host, ca)
f = ea.visit(f)
mod.update_func(gv, f)
return mod
register_func("relay.transform.ManifestAlloc", ManifestAlloc)
| tqchen/tvm | python/tvm/relay/transform/memory_alloc.py | Python | apache-2.0 | 14,547 | [
"VisIt"
] | bfb30f09622194e4606049df269a6e125e12060302054ac6e1d15cc59975a242 |
class BaseVisitor(object):
def __init__(self, compiler):
self._compiler = compiler
def _visit_list(self, nodes, *args, **kwargs):
return [self.visit(node, *args, **kwargs) for node in nodes]
def visit(self, node, *args, **kwargs):
return node.accept(self, *args, **kwargs)
| helgefmi/Easy | src/easy/visitors/base.py | Python | mit | 311 | [
"VisIt"
] | 3fe0a9f2d21aaaeaaafa4de4f705205fa77f4ab84962f68f2a176ac77e947a1d |
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Gaussian Processes classification examples
"""
import GPy
default_seed = 10000
def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):
"""
Run a Gaussian process classification on the three phase oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.oil()
X = data['X']
Xtest = data['Xtest']
Y = data['Y'][:, 0:1]
Ytest = data['Ytest'][:, 0:1]
Y[Y.flatten()==-1] = 0
Ytest[Ytest.flatten()==-1] = 0
# Create GP model
m = GPy.models.SparseGPClassification(X, Y, kernel=kernel, num_inducing=num_inducing)
# Contrain all parameters to be positive
#m.tie_params('.*len')
m['.*len'] = 10.
# Optimize
if optimize:
for _ in range(5):
m.optimize(max_iters=int(max_iters/5))
print(m)
#Test
probs = m.predict(Xtest)[0]
GPy.util.classification.conf_matrix(probs, Ytest)
return m
def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True):
"""
Simple 1D classification example using EP approximation
:param seed: seed value for data generation (default is 4).
:type seed: int
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
# Model definition
m = GPy.models.GPClassification(data['X'], Y)
# Optimize
if optimize:
#m.update_likelihood_approximation()
# Parameters optimization:
m.optimize()
#m.update_likelihood_approximation()
#m.pseudo_EM()
# Plot
if plot:
from matplotlib import pyplot as plt
fig, axes = plt.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=True):
"""
Simple 1D classification example using Laplace approximation
:param seed: seed value for data generation (default is 4).
:type seed: int
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
likelihood = GPy.likelihoods.Bernoulli()
laplace_inf = GPy.inference.latent_function_inference.Laplace()
kernel = GPy.kern.RBF(1)
# Model definition
m = GPy.core.GP(data['X'], Y, kernel=kernel, likelihood=likelihood, inference_method=laplace_inf)
# Optimize
if optimize:
try:
m.optimize('scg', messages=1)
except Exception as e:
return m
# Plot
if plot:
from matplotlib import pyplot as plt
fig, axes = plt.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, optimize=True, plot=True):
"""
Sparse 1D classification example
:param seed: seed value for data generation (default is 4).
:type seed: int
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
# Model definition
m = GPy.models.SparseGPClassification(data['X'], Y, num_inducing=num_inducing)
m['.*len'] = 4.
# Optimize
if optimize:
m.optimize()
# Plot
if plot:
from matplotlib import pyplot as plt
fig, axes = plt.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
"""
Simple 1D classification example using a heavy side gp transformation
:param seed: seed value for data generation (default is 4).
:type seed: int
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
# Model definition
kernel = GPy.kern.RBF(1)
likelihood = GPy.likelihoods.Bernoulli(gp_link=GPy.likelihoods.link_functions.Heaviside())
ep = GPy.inference.latent_function_inference.expectation_propagation.EP()
m = GPy.core.GP(X=data['X'], Y=Y, kernel=kernel, likelihood=likelihood, inference_method=ep, name='gp_classification_heaviside')
#m = GPy.models.GPClassification(data['X'], likelihood=likelihood)
# Optimize
if optimize:
# Parameters optimization:
for _ in range(5):
m.optimize(max_iters=int(max_iters/5))
print m
# Plot
if plot:
from matplotlib import pyplot as plt
fig, axes = plt.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=None, optimize=True, plot=True):
"""
Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
:param model_type: type of model to fit ['Full', 'FITC', 'DTC'].
:param inducing: number of inducing variables (only used for 'FITC' or 'DTC').
:type inducing: int
:param seed: seed value for data generation.
:type seed: int
:param kernel: kernel to use in the model
:type kernel: a GPy kernel
"""
try:import pods
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
data = pods.datasets.crescent_data(seed=seed)
Y = data['Y']
Y[Y.flatten()==-1] = 0
if model_type == 'Full':
m = GPy.models.GPClassification(data['X'], Y, kernel=kernel)
elif model_type == 'DTC':
m = GPy.models.SparseGPClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing)
m['.*len'] = 10.
elif model_type == 'FITC':
m = GPy.models.FITCClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing)
m['.*len'] = 3.
if optimize:
m.pseudo_EM()
if plot:
m.plot()
print m
return m
| TianpeiLuke/GPy | GPy/examples/classification.py | Python | bsd-3-clause | 6,767 | [
"Gaussian"
] | e0ef5b35df6253d2603fb68c1539e50ca85f4f7f86bd6a7f69f39099d35059c0 |
from __future__ import print_function, division, unicode_literals
import os
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.core.operations import SymmOp
from pymatgen.io.vasp.outputs import Vasprun, Locpot, VolumetricData
from pymatgen.io.vasp.inputs import Incar
from pymatgen.electronic_structure.plotter import BSPlotter, BSPlotterProjected
from pymatgen.electronic_structure.bandstructure import BandStructure
from pymatgen.electronic_structure.core import Spin
from twod_materials.utils import is_converged
def get_band_edges():
"""
Calculate the band edge locations relative to the vacuum level
for a semiconductor. If spin-polarized, returns all 4 band edges.
"""
# Vacuum level energy from LOCPOT.
locpot = Locpot.from_file('LOCPOT')
evac = max(locpot.get_average_along_axis(2))
vasprun = Vasprun('vasprun.xml')
efermi = vasprun.efermi - evac
if vasprun.get_band_structure().is_spin_polarized:
eigenvals = {Spin.up: [], Spin.down: []}
for band in vasprun.eigenvalues:
for eigenvalue in vasprun.eigenvalues[band]:
eigenvals[band[0]].append(eigenvalue)
up_cbm = min([e[0] for e in eigenvals[Spin.up] if not e[1]]) - evac
up_vbm = max([e[0] for e in eigenvals[Spin.up] if e[1]]) - evac
dn_cbm = min([e[0] for e in eigenvals[Spin.down] if not e[1]]) - evac
dn_vbm = max([e[0] for e in eigenvals[Spin.down] if e[1]]) - evac
edges = {'up_cbm': up_cbm, 'up_vbm': up_vbm, 'dn_cbm': dn_cbm,
'dn_vbm': dn_vbm, 'efermi': efermi}
else:
bs = vasprun.get_band_structure()
cbm = bs.get_cbm()['energy'] - evac
vbm = bs.get_vbm()['energy'] - evac
edges = {'cbm': cbm, 'vbm': vbm, 'efermi': efermi}
return edges
def plot_band_alignments(directories, run_type='PBE', fmt='pdf'):
"""
Plot CBM's and VBM's of all compounds together, relative to the band
edges of H2O.
Args:
directories (list): list of the directory paths for materials
to include in the plot.
run_type (str): 'PBE' or 'HSE', so that the function knows which
subdirectory to go into (pbe_bands or hse_bands).
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
if run_type == 'HSE':
subdirectory = 'hse_bands'
else:
subdirectory = 'pbe_bands'
band_gaps = {}
for directory in directories:
if is_converged('{}/{}'.format(directory, subdirectory)):
os.chdir('{}/{}'.format(directory, subdirectory))
band_structure = Vasprun('vasprun.xml').get_band_structure()
band_gap = band_structure.get_band_gap()
# Vacuum level energy from LOCPOT.
locpot = Locpot.from_file('LOCPOT')
evac = max(locpot.get_average_along_axis(2))
if not band_structure.is_metal():
is_direct = band_gap['direct']
cbm = band_structure.get_cbm()
vbm = band_structure.get_vbm()
else:
cbm = None
vbm = None
is_metal = True
is_direct = False
band_gaps[directory] = {'CBM': cbm, 'VBM': vbm,
'Direct': is_direct,
'Metal': band_structure.is_metal(),
'E_vac': evac}
os.chdir('../../')
ax = plt.figure(figsize=(16, 10)).gca()
x_max = len(band_gaps) * 1.315
ax.set_xlim(0, x_max)
# Rectangle representing band edges of water.
ax.add_patch(plt.Rectangle((0, -5.67), height=1.23, width=len(band_gaps),
facecolor='#00cc99', linewidth=0))
ax.text(len(band_gaps) * 1.01, -4.44, r'$\mathrm{H+/H_2}$', size=20,
verticalalignment='center')
ax.text(len(band_gaps) * 1.01, -5.67, r'$\mathrm{O_2/H_2O}$', size=20,
verticalalignment='center')
x_ticklabels = []
y_min = -8
i = 0
# Nothing but lies.
are_directs, are_indirects, are_metals = False, False, False
for compound in [cpd for cpd in directories if cpd in band_gaps]:
x_ticklabels.append(compound)
# Plot all energies relative to their vacuum level.
evac = band_gaps[compound]['E_vac']
if band_gaps[compound]['Metal']:
cbm = -8
vbm = -2
else:
cbm = band_gaps[compound]['CBM']['energy'] - evac
vbm = band_gaps[compound]['VBM']['energy'] - evac
# Add a box around direct gap compounds to distinguish them.
if band_gaps[compound]['Direct']:
are_directs = True
linewidth = 5
elif not band_gaps[compound]['Metal']:
are_indirects = True
linewidth = 0
# Metals are grey.
if band_gaps[compound]['Metal']:
are_metals = True
linewidth = 0
color_code = '#404040'
else:
color_code = '#002b80'
# CBM
ax.add_patch(plt.Rectangle((i, cbm), height=-cbm, width=0.8,
facecolor=color_code, linewidth=linewidth,
edgecolor="#e68a00"))
# VBM
ax.add_patch(plt.Rectangle((i, y_min),
height=(vbm - y_min), width=0.8,
facecolor=color_code, linewidth=linewidth,
edgecolor="#e68a00"))
i += 1
ax.set_ylim(y_min, 0)
# Set tick labels
ax.set_xticks([n + 0.4 for n in range(i)])
ax.set_xticklabels(x_ticklabels, family='serif', size=20, rotation=60)
ax.set_yticklabels(ax.get_yticks(), family='serif', size=20)
# Add a legend
height = y_min
if are_directs:
ax.add_patch(plt.Rectangle((i*1.165, height), width=i*0.15,
height=(-y_min*0.1), facecolor='#002b80',
edgecolor='#e68a00', linewidth=5))
ax.text(i*1.24, height - y_min * 0.05, 'Direct', family='serif',
color='w', size=20, horizontalalignment='center',
verticalalignment='center')
height -= y_min * 0.15
if are_indirects:
ax.add_patch(plt.Rectangle((i*1.165, height), width=i*0.15,
height=(-y_min*0.1), facecolor='#002b80',
linewidth=0))
ax.text(i*1.24, height - y_min * 0.05, 'Indirect', family='serif',
size=20, color='w', horizontalalignment='center',
verticalalignment='center')
height -= y_min * 0.15
if are_metals:
ax.add_patch(plt.Rectangle((i*1.165, height), width=i*0.15,
height=(-y_min*0.1), facecolor='#404040',
linewidth=0))
ax.text(i*1.24, height - y_min * 0.05, 'Metal', family='serif',
size=20, color='w', horizontalalignment='center',
verticalalignment='center')
# Who needs axes?
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_ylabel('eV', family='serif', size=24)
plt.savefig('band_alignments.{}'.format(fmt), transparent=True)
plt.close()
def plot_local_potential(axis=2, ylim=(-20, 0), fmt='pdf'):
"""
Plot data from the LOCPOT file along any of the 3 primary axes.
Useful for determining surface dipole moments and electric
potentials on the interior of the material.
Args:
axis (int): 0 = x, 1 = y, 2 = z
ylim (tuple): minimum and maximum potentials for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
ax = plt.figure(figsize=(16, 10)).gca()
locpot = Locpot.from_file('LOCPOT')
structure = Structure.from_file('CONTCAR')
vd = VolumetricData(structure, locpot.data)
abs_potentials = vd.get_average_along_axis(axis)
vacuum_level = max(abs_potentials)
vasprun = Vasprun('vasprun.xml')
bs = vasprun.get_band_structure()
if not bs.is_metal():
cbm = bs.get_cbm()['energy'] - vacuum_level
vbm = bs.get_vbm()['energy'] - vacuum_level
potentials = [potential - vacuum_level for potential in abs_potentials]
axis_length = structure.lattice._lengths[axis]
positions = np.arange(0, axis_length, axis_length / len(potentials))
ax.plot(positions, potentials, linewidth=2, color='k')
ax.set_xlim(0, axis_length)
ax.set_ylim(ylim[0], ylim[1])
ax.set_xticklabels([r'$\mathrm{%s}$' % tick for tick in ax.get_xticks()],
size=20)
ax.set_yticklabels([r'$\mathrm{%s}$' % tick for tick in ax.get_yticks()],
size=20)
ax.set_xlabel(r'$\mathrm{\AA}$', size=24)
ax.set_ylabel(r'$\mathrm{V\/(eV)}$', size=24)
if not bs.ismetal():
ax.text(ax.get_xlim()[1], cbm, r'$\mathrm{CBM}$',
horizontalalignment='right', verticalalignment='bottom',
size=20)
ax.text(ax.get_xlim()[1], vbm, r'$\mathrm{VBM}$',
horizontalalignment='right', verticalalignment='top', size=20)
ax.fill_between(ax.get_xlim(), cbm, ax.get_ylim()[1],
facecolor=plt.cm.jet(0.3), zorder=0, linewidth=0)
ax.fill_between(ax.get_xlim(), ax.get_ylim()[0], vbm,
facecolor=plt.cm.jet(0.7), zorder=0, linewidth=0)
plt.savefig('locpot.{}'.format(fmt))
plt.close()
def plot_band_structure(ylim=(-5, 5), draw_fermi=False, fmt='pdf'):
"""
Plot a standard band structure with no projections.
Args:
ylim (tuple): minimum and maximum potentials for the plot's
y-axis.
draw_fermi (bool): whether or not to draw a dashed line at
E_F.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
vasprun = Vasprun('vasprun.xml')
efermi = vasprun.efermi
bsp = BSPlotter(vasprun.get_band_structure('KPOINTS', line_mode=True,
efermi=efermi))
plot = bsp.get_plot(ylim=ylim)
fig = plot.gcf()
ax = fig.gca()
ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
if draw_fermi:
ax.plot([ax.get_xlim()[0], ax.get_xlim()[1]], [0, 0], 'k--')
fig.savefig('band_structure.{}'.format(fmt), transparent=True)
plt.close()
def plot_color_projected_bands(ylim=(-5, 5), fmt='pdf'):
"""
Plot a single band structure where the color of the band indicates
the elemental character of the eigenvalue.
Args:
ylim (tuple): minimum and maximum energies for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
vasprun = Vasprun('vasprun.xml', parse_projected_eigen=True)
bs = vasprun.get_band_structure('KPOINTS', line_mode=True)
bspp = BSPlotterProjected(bs)
plot = bspp.get_elt_projected_plots_color()
fig = plot.gcf()
ax = fig.gca()
ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
ax.set_ylim(ylim)
fig.savefig('color_projected_bands.{}'.format(fmt))
plt.close()
def plot_elt_projected_bands(ylim=(-5, 5), fmt='pdf'):
"""
Plot separate band structures for each element where the size of the
markers indicates the elemental character of the eigenvalue.
Args:
ylim (tuple): minimum and maximum energies for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
vasprun = Vasprun('vasprun.xml', parse_projected_eigen=True)
bs = vasprun.get_band_structure('KPOINTS', line_mode=True)
bspp = BSPlotterProjected(bs)
bspp.get_elt_projected_plots(ylim=ylim).savefig(
'elt_projected_bands.{}'.format(fmt))
plt.close()
def plot_orb_projected_bands(orbitals, fmt='pdf', ylim=(-5, 5)):
"""
Plot a separate band structure for each orbital of each element in
orbitals.
Args:
orbitals (dict): dictionary of the form
{element: [orbitals]},
e.g. {'Mo': ['s', 'p', 'd'], 'S': ['p']}
ylim (tuple): minimum and maximum energies for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
vasprun = Vasprun('vasprun.xml', parse_projected_eigen=True)
bs = vasprun.get_band_structure('KPOINTS', line_mode=True)
bspp = BSPlotterProjected(bs)
bspp.get_projected_plots_dots(orbitals, ylim=ylim).savefig(
'orb_projected_bands.{}'.format(fmt))
plt.close()
def get_effective_mass():
"""
This function is in a beta stage, and its results are not
guaranteed to be useful.
Finds effective masses from a band structure, using parabolic
fitting to determine the band curvature at the CBM
for electrons and at the VBM for holes. This curvature enters
the equation m* = (hbar)**2 / (d^2E/dk^2).
To consider anisotropy, the k-space directions to the left and right
of the CBM/VBM in the band diagram are returned separately.
*NOTE* Only works for semiconductors and linemode calculations (non-
spin polarized).
>30 k-points per string recommended to obtain
reliable curvatures.
*NOTE* The parabolic fit can be quite sensitive to the number of
k-points fit to, so it might be worthwhile adjusting N_KPTS
to obtain some sense of the error bar.
TODO: Warn user if CBM/VBM is at the edge of the diagram, and which
direction (either left or right) was not actually fit to.
Until fixed, this (most likely) explains any negative masses
returned.
Returns:
Dictionary of the form
{'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},
'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}
where 'left' and 'right' indicate the reciprocal
directions to the left and right of the extremum in the
band structure.
"""
H_BAR = 6.582119514e-16 # eV*s
M_0 = 9.10938356e-31 # kg
N_KPTS = 6 # Number of k-points included in the parabola.
spin_up = Spin(1)
band_structure = Vasprun('vasprun.xml').get_band_structure()
# Locations of CBM and VBM in band_structure.bands
cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]
cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]
vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]
vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]
k = {'electron': {'left': [], 'right': []},
'hole': {'left': [], 'right': []}}
E = {'electron': {'left': [], 'right': []},
'hole': {'left': [], 'right': []}}
e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords
h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords
for n in range(-N_KPTS, 1):
e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords
h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords
k['electron']['left'].append(
((e_coords[0] - e_ref_coords[0])**2 +
(e_coords[1] - e_ref_coords[1])**2 +
(e_coords[2] - e_ref_coords[2])**2)**0.5
)
k['hole']['left'].append(
((h_coords[0] - h_ref_coords[0])**2 +
(h_coords[1] - h_ref_coords[1])**2 +
(h_coords[2] - h_ref_coords[2])**2)**0.5
)
e_energy = band_structure.bands[
spin_up][cbm_band_index][cbm_kpoint_index + n]
h_energy = band_structure.bands[
spin_up][vbm_band_index][vbm_kpoint_index + n]
E['electron']['left'].append(e_energy)
E['hole']['left'].append(h_energy)
for n in range(1, 1 + N_KPTS):
e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords
h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords
k['electron']['right'].append(
((e_coords[0] - e_ref_coords[0])**2 +
(e_coords[1] - e_ref_coords[1])**2 +
(e_coords[2] - e_ref_coords[2])**2)**0.5
)
k['hole']['right'].append(
((h_coords[0] - h_ref_coords[0])**2 +
(h_coords[1] - h_ref_coords[1])**2 +
(h_coords[2] - h_ref_coords[2])**2)**0.5
)
e_energy = band_structure.bands[
spin_up][cbm_band_index][cbm_kpoint_index + n]
h_energy = band_structure.bands[
spin_up][vbm_band_index][vbm_kpoint_index + n]
E['electron']['right'].append(e_energy)
E['hole']['right'].append(h_energy)
# 2nd order fits
e_l_fit = np.poly1d(
np.polyfit(k['electron']['left'], E['electron']['left'], 2))
e_r_fit = np.poly1d(
np.polyfit(k['electron']['right'], E['electron']['right'], 2))
h_l_fit = np.poly1d(
np.polyfit(k['hole']['left'], E['hole']['left'], 2))
h_r_fit = np.poly1d(
np.polyfit(k['hole']['right'], E['hole']['right'], 2))
# Curvatures
e_l_curvature = e_l_fit.deriv().deriv()[0]
e_r_curvature = e_r_fit.deriv().deriv()[0]
h_l_curvature = h_l_fit.deriv().deriv()[0]
h_r_curvature = h_r_fit.deriv().deriv()[0]
# Unit conversion
e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0
e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0
h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0
h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0
return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},
'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}
def plot_density_of_states(fmt='pdf'):
"""
Plots the density of states from the DOSCAR in the cwd. Plots
spin up in red, down in green, and the sum in black. Efermi = 0.
Args:
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
efermi = Vasprun('vasprun.xml').efermi
ticks = [-10, -5, -3, -2, -1, 0, 1, 2, 3, 5]
dos_lines = open ('DOSCAR').readlines()
x,up,down = np.array(), np.array(), np.array()
nedos = Incar.from_file('INCAR').as_dict()['NEDOS'] - 1
for line in dos_lines[6:6+nedos]:
split_line = line.split()
x.append(float(split_line[0]) - efermi)
up.append(float(split_line[1]))
down.append(-float(split_line[2]))
sum = up + down
ax = plt.figure().gca()
ax.set_xlim(-10, 5)
ax.set_ylim(-1.5, 1.5)
ax.set_xlabel('E (eV)')
ax.set_ylabel('Density of States')
ax.set_xticks(ticks)
ax.plot(x, up, color='red' )
ax.plot(x, down, color='green')
ax.plot(x, sum, color='black' )
plt.savefig('density_of_states.{}'.format(fmt))
plt.close()
def get_fermi_velocities():
"""
Calculates the fermi velocity of each band that crosses the fermi
level, according to v_F = dE/(h_bar*dk).
Returns:
fermi_velocities (list). The absolute values of the
adjusted slopes of each band, in Angstroms/s.
"""
vr = Vasprun('vasprun.xml')
eigenvalues = vr.eigenvalues
bs = vr.get_band_structure()
bands = bs.bands
kpoints = bs.kpoints
efermi = bs.efermi
h_bar = 6.582e-16 # eV*s
fermi_bands = []
for spin in bands:
for i in range(len(bands[spin])):
if max(bands[spin][i]) > efermi > min(bands[spin][i]):
fermi_bands.append(bands[spin][i])
fermi_velocities = []
for band in fermi_bands:
for i in range(len(band)-1):
if (band[i] < efermi and band[i+1] > efermi) or (
band[i] > efermi and band[i+1] < efermi):
dk = np.sqrt((kpoints[i+1].cart_coords[0]
- kpoints[i].cart_coords[0])**2
+ (kpoints[i+1].cart_coords[1]
- kpoints[i].cart_coords[1])**2)
v_f = abs((band[i+1] - band[i]) / (h_bar * dk))
fermi_velocities.append(v_f)
return fermi_velocities # Values are in Angst./s
def find_dirac_nodes():
"""
Look for band crossings near (within `tol` eV) the Fermi level.
Returns:
boolean. Whether or not a band crossing occurs at or near
the fermi level.
"""
vasprun = Vasprun('vasprun.xml')
dirac = False
if vasprun.get_band_structure().get_band_gap()['energy'] < 0.1:
efermi = vasprun.efermi
bsp = BSPlotter(vasprun.get_band_structure('KPOINTS', line_mode=True,
efermi=efermi))
bands = []
data = bsp.bs_plot_data(zero_to_efermi=True)
for d in range(len(data['distances'])):
for i in range(bsp._nb_bands):
x = data['distances'][d],
y = [data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))]
band = [x, y]
bands.append(band)
considered = []
for i in range(len(bands)):
for j in range(len(bands)):
if i != j and (j, i) not in considered:
considered.append((j, i))
for k in range(len(bands[i][0])):
if -0.1 < bands[i][1][k] < 0.1 and -0.1 < bands[i][1][k] - bands[j][1][k] < 0.1:
dirac = True
return dirac
def plot_spin_texture(inner_index, outer_index, center=(0, 0), fmt='pdf'):
"""
Create six plots- one for the spin texture in x, y, and z in
each of two bands: an inner band and an outer band. For
Rashba spin-splitting, these two bands should be the two that
have split.
Args:
inner_index, outer_index (int): indices of the two spin-split
bands.
center (tuple): coordinates of the center of the splitting
(where the bands cross). Defaults to Gamma.
fmt: matplotlib format style. Check the matplotlib
docs for options.
"""
procar_lines = open("PROCAR").readlines()
data = procar_lines[1].split()
n_kpts = int(data[3])
n_bands = int(data[7])
n_ions = int(data[11])
# These numbers, along with almost everything else in this
# function, are magical. Don't touch them.
band_step = (n_ions + 1) * 4 + 4
k_step = n_bands * band_step + 3
kpoints = []
spin_textures = {'inner': {'x': [], 'y': [], 'z': []},
'outer': {'x': [], 'y': [], 'z': []}}
for n in range(n_kpts):
for var in ['x', 'y', 'z']:
spin_textures['inner'][var].append(0)
spin_textures['outer'][var].append(0)
i = 3
j = 0
while i < len(procar_lines):
kpoints.append([float(procar_lines[i][18:29]) - center[0],
float(procar_lines[i][29:40]) - center[1]])
spin_textures['inner']['x'][j] += float(
procar_lines[i+(4+(n_ions+1)*2)+inner_index*band_step].split()[-1]
)
spin_textures['inner']['y'][j] += float(
procar_lines[i+(4+(n_ions+1)*3)+inner_index*band_step].split()[-1]
)
spin_textures['inner']['z'][j] += float(
procar_lines[i+(4+(n_ions+1)*4)+inner_index*band_step].split()[-1]
)
spin_textures['outer']['x'][j] += float(
procar_lines[i+(4+(n_ions+1)*2)+outer_index*band_step].split()[-1]
)
spin_textures['outer']['y'][j] += float(
procar_lines[i+(4+(n_ions+1)*3)+outer_index*band_step].split()[-1]
)
spin_textures['outer']['z'][j] += float(
procar_lines[i+(4+(n_ions+1)*4)+outer_index*band_step].split()[-1]
)
i += k_step
j += 1
for branch in spin_textures:
for vector in spin_textures[branch]:
print('plotting {}_{}.{}'.format(branch, vector, fmt))
ax = plt.subplot(111, projection='polar')
raw = [
spin_textures[branch][vector][k] for k in range(len(kpoints))
]
minimum = min(raw)
maximum = max(raw) - minimum
r_max = max([np.sqrt(kpt[0]**2 + kpt[1]**2) for kpt in kpoints])
for l in range(len(kpoints)):
if kpoints[l][0] == 0 and kpoints[l][1] > 0:
theta = np.pi / 2.0
elif kpoints[l][0] == 0:
theta = 3.0 * np.pi / 2.0
elif kpoints[l][0] < 0:
theta = np.pi + np.arctan(kpoints[l][1] / kpoints[l][0])
else:
theta = np.arctan(kpoints[l][1] / kpoints[l][0])
r = np.sqrt(kpoints[l][0]**2 + kpoints[l][1]**2)
if r == 0:
w = 0
else:
w = r_max*0.07/r
ax.add_patch(
plt.Rectangle(
(theta, r), width=w, height=r_max*0.07,
color=plt.cm.rainbow(
(spin_textures[branch][vector][l]-minimum)/maximum
)
)
)
ax.plot(0, 0, linewidth=0, marker='o', color='k', markersize=18)
ax.set_rmax(r_max)
plt.axis('off')
plt.savefig('{}_{}.{}'.format(branch, vector, fmt))
plt.close()
| ashtonmv/twod_materials | twod_materials/electronic_structure/analysis.py | Python | gpl-3.0 | 25,907 | [
"DIRAC",
"VASP",
"pymatgen"
] | 98ebf052be3aeadaad1496bab8b0a687660bfdbaabec671db70214ea3714267a |
###############################################################################
# TwoPowerTriaxialPotential.py: General class for triaxial potentials
# derived from densities with two power-laws
#
# amp/[4pia^3]
# rho(r)= ------------------------------------
# (m/a)^\alpha (1+m/a)^(\beta-\alpha)
#
# with
#
# m^2 = x^2 + y^2/b^2 + z^2/c^2
###############################################################################
import numpy
from scipy import special
from ..util import conversion
from .EllipsoidalPotential import EllipsoidalPotential
class TwoPowerTriaxialPotential(EllipsoidalPotential):
"""Class that implements triaxial potentials that are derived from
two-power density models
.. math::
\\rho(x,y,z) = \\frac{\\mathrm{amp}}{4\\,\\pi\\,a^3}\\,\\frac{1}{(m/a)^\\alpha\\,(1+m/a)^{\\beta-\\alpha}}
with
.. math::
m^2 = x'^2 + \\frac{y'^2}{b^2}+\\frac{z'^2}{c^2}
and :math:`(x',y',z')` is a rotated frame wrt :math:`(x,y,z)` specified by parameters ``zvec`` and ``pa`` which specify (a) ``zvec``: the location of the :math:`z'` axis in the :math:`(x,y,z)` frame and (b) ``pa``: the position angle of the :math:`x'` axis wrt the :math:`\\tilde{x}` axis, that is, the :math:`x` axis after rotating to ``zvec``.
Note that this general class of potentials does *not* automatically revert to the special TriaxialNFWPotential, TriaxialHernquistPotential, or TriaxialJaffePotential when using their (alpha,beta) values (like TwoPowerSphericalPotential).
"""
def __init__(self,amp=1.,a=5.,alpha=1.5,beta=3.5,b=1.,c=1.,
zvec=None,pa=None,glorder=50,
normalize=False,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a triaxial two-power-density potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass or Gxmass
a - scale radius (can be Quantity)
alpha - inner power (0 <= alpha < 3)
beta - outer power ( beta > 2)
b - y-to-x axis ratio of the density
c - z-to-x axis ratio of the density
zvec= (None) If set, a unit vector that corresponds to the z axis
pa= (None) If set, the position angle of the x axis (rad or Quantity)
glorder= (50) if set, compute the relevant force and potential integrals with Gaussian quadrature of this order
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2016-05-30 - Started - Bovy (UofT)
2018-08-07 - Re-written using the general EllipsoidalPotential class - Bovy (UofT)
"""
EllipsoidalPotential.__init__(self,amp=amp,b=b,c=c,
zvec=zvec,pa=pa,glorder=glorder,
ro=ro,vo=vo,amp_units='mass')
a= conversion.parse_length(a,ro=self._ro)
self.a= a
self._scale= self.a
if beta <= 2. or alpha >= 3.:
raise IOError('TwoPowerTriaxialPotential requires 0 <= alpha < 3 and beta > 2')
self.alpha= alpha
self.beta= beta
self.betaminusalpha= self.beta-self.alpha
self.twominusalpha= 2.-self.alpha
self.threeminusalpha= 3.-self.alpha
if self.twominusalpha != 0.:
self.psi_inf= special.gamma(self.beta-2.)\
*special.gamma(3.-self.alpha)\
/special.gamma(self.betaminusalpha)
# Adjust amp
self._amp/= (4.*numpy.pi*self.a**3)
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
return None
def _psi(self,m):
"""\psi(m) = -\int_m^\infty d m^2 \rho(m^2)"""
if self.twominusalpha == 0.:
return -2.*self.a**2*(self.a/m)**self.betaminusalpha\
/self.betaminusalpha\
*special.hyp2f1(self.betaminusalpha,
self.betaminusalpha,
self.betaminusalpha+1,
-self.a/m)
else:
return -2.*self.a**2\
*(self.psi_inf-(m/self.a)**self.twominusalpha\
/self.twominusalpha\
*special.hyp2f1(self.twominusalpha,
self.betaminusalpha,
self.threeminusalpha,
-m/self.a))
def _mdens(self,m):
"""Density as a function of m"""
return (self.a/m)**self.alpha/(1.+m/self.a)**(self.betaminusalpha)
def _mdens_deriv(self,m):
"""Derivative of the density as a function of m"""
return -self._mdens(m)*(self.a*self.alpha+self.beta*m)/m/(self.a+m)
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R (and z) for this potential; if z=None, integrate to ellipsoidal boundary
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2021-03-09 - Written - Bovy (UofT)
"""
if not z is None: raise AttributeError # Hack to fall back to general
return 4.*numpy.pi*self.a**self.alpha\
*R**(3.-self.alpha)/(3.-self.alpha)*self._b*self._c\
*special.hyp2f1(3.-self.alpha,self.betaminusalpha,
4.-self.alpha,-R/self.a)
class TriaxialHernquistPotential(EllipsoidalPotential):
"""Class that implements the triaxial Hernquist potential
.. math::
\\rho(x,y,z) = \\frac{\\mathrm{amp}}{4\\,\\pi\\,a^3}\\,\\frac{1}{(m/a)\\,(1+m/a)^{3}}
with
.. math::
m^2 = x'^2 + \\frac{y'^2}{b^2}+\\frac{z'^2}{c^2}
and :math:`(x',y',z')` is a rotated frame wrt :math:`(x,y,z)` specified by parameters ``zvec`` and ``pa`` which specify (a) ``zvec``: the location of the :math:`z'` axis in the :math:`(x,y,z)` frame and (b) ``pa``: the position angle of the :math:`x'` axis wrt the :math:`\\tilde{x}` axis, that is, the :math:`x` axis after rotating to ``zvec``.
"""
def __init__(self,amp=1.,a=2.,normalize=False,b=1.,c=1.,zvec=None,pa=None,
glorder=50,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize a triaxial Hernquist potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass or Gxmass
a - scale radius (can be Quantity)
b - y-to-x axis ratio of the density
c - z-to-x axis ratio of the density
zvec= (None) If set, a unit vector that corresponds to the z axis
pa= (None) If set, the position angle of the x axis
glorder= (50) if set, compute the relevant force and potential integrals with Gaussian quadrature of this order
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2010-07-09 - Written - Bovy (UofT)
2018-08-07 - Re-written using the general EllipsoidalPotential class - Bovy (UofT)
"""
EllipsoidalPotential.__init__(self,amp=amp,b=b,c=c,
zvec=zvec,pa=pa,glorder=glorder,
ro=ro,vo=vo,amp_units='mass')
a= conversion.parse_length(a,ro=self._ro)
self.a= a
self._scale= self.a
# Adjust amp
self.a4= self.a**4
self._amp/= (4.*numpy.pi*self.a**3)
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)):
self.normalize(normalize)
self.hasC= not self._glorder is None
self.hasC_dxdv= False
self.hasC_dens= self.hasC # works if mdens is defined, necessary for hasC
return None
def _psi(self,m):
"""\psi(m) = -\int_m^\infty d m^2 \rho(m^2)"""
return -self.a4/(m+self.a)**2.
def _mdens(self,m):
"""Density as a function of m"""
return self.a4/m/(m+self.a)**3
def _mdens_deriv(self,m):
"""Derivative of the density as a function of m"""
return -self.a4*(self.a+4.*m)/m**2/(self.a+m)**4
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R (and z) for this potential; if z=None, integrate to ellipsoidal boundary
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2021-03-16 - Written - Bovy (UofT)
"""
if not z is None: raise AttributeError # Hack to fall back to general
return 4.*numpy.pi*self.a4/self.a/(1.+self.a/R)**2./2.*self._b*self._c
class TriaxialJaffePotential(EllipsoidalPotential):
"""Class that implements the Jaffe potential
.. math::
\\rho(x,y,z) = \\frac{\\mathrm{amp}}{4\\,\\pi\\,a^3}\\,\\frac{1}{(m/a)^2\\,(1+m/a)^{2}}
with
.. math::
m^2 = x'^2 + \\frac{y'^2}{b^2}+\\frac{z'^2}{c^2}
and :math:`(x',y',z')` is a rotated frame wrt :math:`(x,y,z)` specified by parameters ``zvec`` and ``pa`` which specify (a) ``zvec``: the location of the :math:`z'` axis in the :math:`(x,y,z)` frame and (b) ``pa``: the position angle of the :math:`x'` axis wrt the :math:`\\tilde{x}` axis, that is, the :math:`x` axis after rotating to ``zvec``.
"""
def __init__(self,amp=1.,a=2.,b=1.,c=1.,zvec=None,pa=None,normalize=False,
glorder=50,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize a Jaffe potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass or Gxmass
a - scale radius (can be Quantity)
b - y-to-x axis ratio of the density
c - z-to-x axis ratio of the density
zvec= (None) If set, a unit vector that corresponds to the z axis
pa= (None) If set, the position angle of the x axis
glorder= (50) if set, compute the relevant force and potential integrals with Gaussian quadrature of this order
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2010-07-09 - Written - Bovy (UofT)
2018-08-07 - Re-written using the general EllipsoidalPotential class - Bovy (UofT)
"""
EllipsoidalPotential.__init__(self,amp=amp,b=b,c=c,
zvec=zvec,pa=pa,glorder=glorder,
ro=ro,vo=vo,amp_units='mass')
a= conversion.parse_length(a,ro=self._ro)
self.a= a
self._scale= self.a
# Adjust amp
self.a2= self.a**2
self._amp/= (4.*numpy.pi*self.a2*self.a)
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
self.hasC= not self._glorder is None
self.hasC_dxdv= False
self.hasC_dens= self.hasC # works if mdens is defined, necessary for hasC
return None
def _psi(self,m):
"""\psi(m) = -\int_m^\infty d m^2 \rho(m^2)"""
return 2.*self.a2*(1./(1.+m/self.a)+numpy.log(1./(1.+self.a/m)))
def _mdens(self,m):
"""Density as a function of m"""
return self.a2/m**2/(1.+m/self.a)**2
def _mdens_deriv(self,m):
"""Derivative of the density as a function of m"""
return -2.*self.a2**2*(self.a+2.*m)/m**3/(self.a+m)**3
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R (and z) for this potential; if z=None, integrate to ellipsoidal boundary
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2021-03-16 - Written - Bovy (UofT)
"""
if not z is None: raise AttributeError # Hack to fall back to general
return 4.*numpy.pi*self.a*self.a2/(1.+self.a/R)*self._b*self._c
class TriaxialNFWPotential(EllipsoidalPotential):
"""Class that implements the triaxial NFW potential
.. math::
\\rho(x,y,z) = \\frac{\\mathrm{amp}}{4\\,\\pi\\,a^3}\\,\\frac{1}{(m/a)\\,(1+m/a)^{2}}
with
.. math::
m^2 = x'^2 + \\frac{y'^2}{b^2}+\\frac{z'^2}{c^2}
and :math:`(x',y',z')` is a rotated frame wrt :math:`(x,y,z)` specified by parameters ``zvec`` and ``pa`` which specify (a) ``zvec``: the location of the :math:`z'` axis in the :math:`(x,y,z)` frame and (b) ``pa``: the position angle of the :math:`x'` axis wrt the :math:`\\tilde{x}` axis, that is, the :math:`x` axis after rotating to ``zvec``.
"""
def __init__(self,amp=1.,a=2.,b=1.,c=1.,zvec=None,pa=None,
normalize=False,
conc=None,mvir=None,
glorder=50,vo=None,ro=None,
H=70.,Om=0.3,overdens=200.,wrtcrit=False):
"""
NAME:
__init__
PURPOSE:
Initialize a triaxial NFW potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass or Gxmass
a - scale radius (can be Quantity)
b - y-to-x axis ratio of the density
c - z-to-x axis ratio of the density
zvec= (None) If set, a unit vector that corresponds to the z axis
pa= (None) If set, the position angle of the x axis
glorder= (50) if set, compute the relevant force and potential integrals with Gaussian quadrature of this order
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
Alternatively, NFW potentials can be initialized using
conc= concentration
mvir= virial mass in 10^12 Msolar
in which case you also need to supply the following keywords
H= (default: 70) Hubble constant in km/s/Mpc
Om= (default: 0.3) Omega matter
overdens= (200) overdensity which defines the virial radius
wrtcrit= (False) if True, the overdensity is wrt the critical density rather than the mean matter density
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2016-05-30 - Written - Bovy (UofT)
2018-08-06 - Re-written using the general EllipsoidalPotential class - Bovy (UofT)
"""
EllipsoidalPotential.__init__(self,amp=amp,b=b,c=c,
zvec=zvec,pa=pa,glorder=glorder,
ro=ro,vo=vo,amp_units='mass')
a= conversion.parse_length(a,ro=self._ro)
if conc is None:
self.a= a
else:
from ..potential import NFWPotential
dum= NFWPotential(mvir=mvir,conc=conc,ro=self._ro,vo=self._vo,
H=H,Om=Om,wrtcrit=wrtcrit,overdens=overdens)
self.a= dum.a
self._amp= dum._amp
self._scale= self.a
self.hasC= not self._glorder is None
self.hasC_dxdv= False
self.hasC_dens= self.hasC # works if mdens is defined, necessary for hasC
# Adjust amp
self.a3= self.a**3
self._amp/= (4.*numpy.pi*self.a3)
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)):
self.normalize(normalize)
return None
def _psi(self,m):
"""\psi(m) = -\int_m^\infty d m^2 \rho(m^2)"""
return -2.*self.a3/(self.a+m)
def _mdens(self,m):
"""Density as a function of m"""
return self.a/m/(1.+m/self.a)**2
def _mdens_deriv(self,m):
"""Derivative of the density as a function of m"""
return -self.a3*(self.a+3.*m)/m**2/(self.a+m)**3
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R (and z) for this potential; if z=None, integrate to ellipsoidal boundary
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2021-03-16 - Written - Bovy (UofT)
"""
if not z is None: raise AttributeError # Hack to fall back to general
return 4.*numpy.pi*self.a3*self._b*self._c\
*(numpy.log(1+R/self.a)-R/self.a/(1.+R/self.a))
| jobovy/galpy | galpy/potential/TwoPowerTriaxialPotential.py | Python | bsd-3-clause | 18,183 | [
"Gaussian"
] | 710f9c6ae6b56fbf0886c460a26ae11d53e58bff4d5d119a0f27d565a894fb2a |
from numbers import Real, Integral
import numpy as np
from . import _batoid
from .constants import globalCoordSys, vacuum
from .coordSys import CoordSys
from .coordTransform import CoordTransform
from .trace import applyForwardTransform, applyForwardTransformArrays
from .utils import lazy_property, fieldToDirCos
from .surface import Plane
def _reshape_arrays(arrays, shape, dtype=float):
for i in range(len(arrays)):
array = arrays[i]
if not hasattr(array, 'shape') or array.shape != shape:
arrays[i] = np.array(np.broadcast_to(array, shape))
arrays[i] = np.ascontiguousarray(arrays[i], dtype=dtype)
return arrays
class RayVector:
"""Create RayVector from 1d parameter arrays. Always makes a copy
of input arrays.
Parameters
----------
x, y, z : ndarray of float, shape (n,)
Positions of rays in meters.
vx, vy, vz : ndarray of float, shape (n,)
Velocities of rays in units of the speed of light in vacuum.
t : ndarray of float, shape (n,)
Reference times (divided by the speed of light in vacuum) in units
of meters.
wavelength : ndarray of float, shape (n,)
Vacuum wavelengths in meters.
flux : ndarray of float, shape (n,)
Fluxes in arbitrary units.
vignetted : ndarray of bool, shape (n,)
True where rays have been vignetted.
coordSys : CoordSys
Coordinate system in which this ray is expressed. Default: the
global coordinate system.
"""
def __init__(
self, x, y, z, vx, vy, vz, t=0.0, wavelength=0.0, flux=1.0,
vignetted=False, failed=False, coordSys=globalCoordSys
):
shape = np.broadcast(
x, y, z, vx, vy, vz, t, wavelength, flux, vignetted, failed
).shape
x, y, z, vx, vy, vz, t, wavelength, flux = _reshape_arrays(
[x, y, z, vx, vy, vz, t, wavelength, flux],
shape
)
vignetted, failed = _reshape_arrays(
[vignetted, failed],
shape,
bool
)
self._x = x
self._y = y
self._z = z
self._vx = vx
self._vy = vy
self._vz = vz
self._t = t
self._wavelength = wavelength
self._flux = flux
self._vignetted = vignetted
self._failed = failed
self.coordSys = coordSys
@staticmethod
def _directInit(
x, y, z, vx, vy, vz, t, wavelength, flux, vignetted, failed, coordSys
):
ret = RayVector.__new__(RayVector)
ret._x = x
ret._y = y
ret._z = z
ret._vx = vx
ret._vy = vy
ret._vz = vz
ret._t = t
ret._wavelength = wavelength
ret._flux = flux
ret._vignetted = vignetted
ret._failed = failed
ret.coordSys = coordSys
return ret
def positionAtTime(self, t):
"""Calculate the positions of the rays at a given time.
Parameters
----------
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of float, shape (n, 3)
Positions in meters.
"""
x = np.empty(len(self._x))
y = np.empty(len(self._x))
z = np.empty(len(self._x))
self._rv.positionAtTime(t, x.ctypes.data, y.ctypes.data, z.ctypes.data)
return np.array([x, y, z]).T
def propagate(self, t):
"""Propagate this RayVector to given time.
Parameters
----------
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
RayVector
Reference to self, no copy is made.
"""
self._rv.propagateInPlace(t)
return self
def phase(self, r, t):
"""Calculate plane wave phases at given position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters at which to compute phase
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of float, shape(n,)
"""
out = np.empty_like(self._t)
self._rv.phase(r[0], r[1], r[2], t, out.ctypes.data)
return out
def amplitude(self, r, t):
"""Calculate (scalar) complex electric-field amplitudes at given
position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters.
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of complex, shape (n,)
"""
out = np.empty_like(self._t, dtype=np.complex128)
self._rv.amplitude(r[0], r[1], r[2], t, out.ctypes.data)
return out
def sumAmplitude(self, r, t, ignoreVignetted=True):
"""Calculate the sum of (scalar) complex electric-field amplitudes of
all rays at given position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters.
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
complex
"""
return self._rv.sumAmplitude(r[0], r[1], r[2], t, ignoreVignetted)
@classmethod
def asGrid(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
nx=None, ny=None,
dx=None, dy=None,
lx=None, ly=None,
flux=1,
nrandom=None
):
"""Create RayVector on a parallelogram shaped region.
This function will often be used to create a grid of rays on a square
grid, but is flexible enough to also create grids on an arbitrary
parallelogram, or even randomly distributed across an arbitrary
parallelogram-shaped region.
The algorithm starts by placing rays on the "stop" surface, and then
backing them up such that they are in front of any surfaces of the
optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``lx`` from the Optic. Note that
values explicitly passed to `asGrid` as keyword arguments override
those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the rays and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
nx, ny : int, optional
Number of rays on each side of grid.
dx, dy : float or (2,) array of float, optional
Separation in meters between adjacent rays in grid. If scalars,
then the separations are exactly along the x and y directions. If
arrays, then these are interpretted as the primitive vectors for
the first and second dimensions of the grid. If only dx is
explicitly specified, then dy will be inferred as a 90-degree
rotation from dx with the same length as dx.
lx, ly : float or (2,) array of float, optional
Length of each side of ray grid. If scalars, then these are
measured along the x and y directions. If arrays, then these also
indicate the primitive vectors orientation of the grid. If only
lx is specified, then ly will be inferred as a 90-degree rotation
from lx with the same length as lx. If lx is ``None``, then first
infer a value from ``nx`` and ``dx``, and if that doesn't work,
infer a value from ``optic.pupilSize``.
flux : float, optional
Flux to assign each ray. Default is 1.0.
nrandom : None or int, optional
If not None, then uniformly sample this many rays from
parallelogram region instead of sampling on a regular grid.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
try:
stopSurface = optic.stopSurface
except AttributeError:
stopSurface = None
if lx is None:
# If nx and dx are both present, then let lx get inferred from
# them. Otherwise, infer from optic.
if nx is None or dx is None:
lx = optic.pupilSize
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
# To determine the parallelogram, exactly 2 of nx, dx, lx must be set.
if sum(a is not None for a in [nx, dx, lx]) != 2:
raise ValueError("Exactly 2 of nx, dx, lx must be specified")
if nx is not None and ny is None:
ny = nx
if dx is not None and dy is None:
dy = dx
if lx is not None and ly is None:
if isinstance(lx, Real):
ly = lx
else:
ly = np.dot(np.array([[0, -1], [1, 0]]), lx)
# We need lx, ly, nx, ny for below, so construct these from other
# arguments if they're not already available.
if nx is not None and dx is not None:
if (nx%2) == 0:
lx = dx*(nx-2)
else:
lx = dx*(nx-1)
if (ny%2) == 0:
ly = dy*(ny-2)
else:
ly = dy*(ny-1)
elif lx is not None and dx is not None:
# adjust dx in this case
# always infer an even n (since even and odd are degenerate given
# only lx, dx).
slop = 0.1 # prevent 3.9999 -> 3, e.g.
nx = int((lx/dx+slop)//2)*2+2
ny = int((ly/dy+slop)//2)*2+2
# These are the real dx, dy; which may be different from what was
# passed in order to force an integer for nx/ny. We don't actually
# need them after this point though.
# dx = lx/(nx-2)
# dy = ly/(ny-2)
if isinstance(lx, Real):
lx = (lx, 0.0)
if isinstance(ly, Real):
ly = (0.0, ly)
if nrandom is not None:
xx = np.random.uniform(-0.5, 0.5, size=nrandom)
yy = np.random.uniform(-0.5, 0.5, size=nrandom)
else:
if nx <= 2:
x_d = 1.
else:
x_d = (nx-(2 if (nx%2) == 0 else 1))/nx
if ny <= 2:
y_d = 1.
else:
y_d = (ny-(2 if (ny%2) == 0 else 1))/ny
xx = np.fft.fftshift(np.fft.fftfreq(nx, x_d))
yy = np.fft.fftshift(np.fft.fftfreq(ny, y_d))
xx, yy = np.meshgrid(xx, yy)
xx = xx.ravel()
yy = yy.ravel()
stack = np.stack([xx, yy])
x = np.dot(lx, stack)
y = np.dot(ly, stack)
del xx, yy, stack
z = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, x, y, z, w, flux)
@classmethod
def asPolar(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
outer=None, inner=None,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
nrad=None, naz=None,
flux=1,
nrandom=None
):
"""Create RayVector on an annular region using a hexapolar grid.
This function can be used to regularly sample the entrance pupil of a
telescope using polar symmetry (really, hexagonal symmetry). Rings of
different radii are used, with the number of samples on each ring
restricted to a multiple of 6 (with the exception of a potential
central "ring" of radius 0, which is only ever sampled once). This may
be more efficient than using a square grid since more of the rays
generated may avoid vignetting.
This function is also used to generate rays uniformly randomly sampled
from a given annular region.
The algorithm used here starts by placing rays on the "stop" surface,
and then backing them up such that they are in front of any surfaces of
the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``outer`` from the Optic. Note
that values explicitly passed to `asPolar` as keyword arguments
override those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the ray and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
outer : float
Outer radius of annulus in meters.
inner : float, optional
Inner radius of annulus in meters. Default is 0.0.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
nrad : int
Number of radii on which create rays.
naz : int
Approximate number of azimuthal angles uniformly spaced along the
outermost ring. Each ring is constrained to have a multiple of 6
azimuths, so the realized value may be slightly different than
the input value here. Inner rings will have fewer azimuths in
proportion to their radius, but will still be constrained to a
multiple of 6. (If the innermost ring has radius 0, then exactly
1 ray, with azimuth undefined, will be used on that "ring".)
flux : float, optional
Flux to assign each ray. Default is 1.0.
nrandom : int, optional
If not None, then uniformly sample this many rays from annular
region instead of sampling on a hexapolar grid.
"""
from .optic import Interface
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if outer is None:
outer = optic.pupilSize/2
if inner is None:
if hasattr(optic, 'pupilObscuration'):
inner = optic.pupilSize*optic.pupilObscuration/2
else:
inner = 0.0
else:
if inner is None:
inner = 0.0
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
if nrandom is None:
nphis = []
rhos = np.linspace(outer, inner, nrad)
for rho in rhos:
nphi = int((naz*rho/outer)//6)*6
if nphi == 0:
nphi = 6
nphis.append(nphi)
if inner == 0.0:
nphis[-1] = 1
th = np.empty(np.sum(nphis))
rr = np.empty(np.sum(nphis))
idx = 0
for rho, nphi in zip(rhos, nphis):
rr[idx:idx+nphi] = rho
th[idx:idx+nphi] = np.linspace(0, 2*np.pi, nphi, endpoint=False)
idx += nphi
if inner == 0.0:
rr[-1] = 0.0
th[-1] = 0.0
else:
rr = np.sqrt(np.random.uniform(inner**2, outer**2, size=nrandom))
th = np.random.uniform(0, 2*np.pi, size=nrandom)
x = rr*np.cos(th)
y = rr*np.sin(th)
del rr, th
z = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, x, y, z, w, flux)
@classmethod
def asSpokes(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
outer=None, inner=0.0,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
spokes=None, rings=None,
spacing='uniform',
flux=1
):
"""Create RayVector on an annular region using a spokes pattern.
The function generates rays on a rings-and-spokes pattern, with a fixed
number of radii for each azimuth and a fixed number of azimuths for
each radius. Its main use is for decomposing functions in pupil space
into Zernike components using Gaussian Quadrature integration on
annuli. For more general purpose annular sampling, RayVector.asPolar()
is often a better choice since it samples the pupil more uniformly.
The algorithm used here starts by placing rays on the "stop" surface,
and then backing them up such that they are in front of any surfaces of
the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``outer`` from the Optic. Note
that values explicitly passed to `asSpokes` as keyword arguments
override those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the ray and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
outer : float
Outer radius of annulus in meters.
inner : float, optional
Inner radius of annulus in meters. Default is 0.0.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
spokes : int or ndarray of float
If int, then number of spokes to use.
If ndarray, then the values of the spokes azimuthal angles in
radians.
rings : int or ndarray of float
If int, then number of rings to use.
If array, then the values of the ring radii to use in meters.
spacing : {'uniform', 'GQ'}
If uniform, assign ring radii uniformly between ``inner`` and
``outer``.
If GQ, then assign ring radii as the Gaussian Quadrature points
for integration on an annulus. In this case, the ray fluxes will
be set to the Gaussian Quadrature weights (and the ``flux`` kwarg
will be ignored).
flux : float, optional
Flux to assign each ray. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if outer is None:
outer = optic.pupilSize/2
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
if isinstance(rings, Integral):
if spacing == 'uniform':
rings = np.linspace(inner, outer, rings)
elif spacing == 'GQ':
if spokes is None:
spokes = 2*rings+1
Li, w = np.polynomial.legendre.leggauss(rings)
eps = inner/outer
area = np.pi*(1-eps**2)
rings = np.sqrt(eps**2 + (1+Li)*(1-eps**2)/2)*outer
flux = w*area/(2*spokes)
if isinstance(spokes, Integral):
spokes = np.linspace(0, 2*np.pi, spokes, endpoint=False)
rings, spokes = np.meshgrid(rings, spokes)
flux = np.broadcast_to(flux, rings.shape)
rings = rings.ravel()
spokes = spokes.ravel()
flux = flux.ravel()
x = rings*np.cos(spokes)
y = rings*np.sin(spokes)
del rings, spokes
z = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, x, y, z, w, flux)
@classmethod
def _finish(cls, backDist, source, dirCos, n, x, y, z, w, flux):
"""Map rays backwards to their source position."""
if isinstance(flux, Real):
flux = np.full(len(x), float(flux))
if source is None:
vv = np.array(dirCos, dtype=float)
vv /= n*np.sqrt(np.dot(vv, vv))
zhat = -n*vv
xhat = np.cross(np.array([1.0, 0.0, 0.0]), zhat)
xhat /= np.sqrt(np.dot(xhat, xhat))
yhat = np.cross(xhat, zhat)
origin = zhat*backDist
rot = np.stack([xhat, yhat, zhat]).T
_batoid.finishParallel(
origin, rot.ravel(), vv,
x.ctypes.data, y.ctypes.data, z.ctypes.data,
len(x)
)
vx = np.full_like(x, vv[0])
vy = np.full_like(y, vv[1])
vz = np.full_like(z, vv[2])
t = np.zeros(len(x), dtype=float)
vignetted = np.zeros(len(x), dtype=bool)
failed = np.zeros(len(x), dtype=bool)
return RayVector._directInit(
x, y, z, vx, vy, vz, t, w,
flux, vignetted, failed, globalCoordSys
)
else:
pass
# v = np.copy(r)
# v -= source
# v /= n*np.einsum('ab,ab->b', v, v)
# r[:] = source
# t = np.zeros(len(r), dtype=float)
# vignetted = np.zeros(len(r), dtype=bool)
# failed = np.zeros(len(r), dtype=bool)
# return RayVector._directInit(
# r, v, t, w, flux, vignetted, failed, globalCoordSys
# )
@classmethod
def fromStop(
cls, x, y,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
flux=1
):
"""Create rays that intersects the "stop" surface at given points.
The algorithm used here starts by placing the rays on the "stop"
surface, and then backing them up such that they are in front of any
surfaces of the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
x, y : ndarray
X/Y coordinates on the stop surface where the rays would intersect
if not refracted or reflected first.
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, and ``stopSurface`` from the Optic. Note that values
explicitly passed here as keyword arguments override those
extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the rays and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of rays. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
source : None or ndarray of float, shape (3,), optional
Where the rays originate. If None, then the rays originate an
infinite distance away, in which case the ``dirCos`` kwarg must also
be specified to set the direction of ray propagation. If an
ndarray, then the rays originates from this point in global
coordinates and the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then indicates the direction of ray propagation.
If source is not None, then this is ignored.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
flux : float, optional
Flux of rays. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
x = np.atleast_1d(x).astype(float, copy=False)
y = np.atleast_1d(y).astype(float, copy=False)
z = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, x, y, z, w, flux)
@classmethod
def fromFieldAngles(
cls, theta_x, theta_y, projection='postel',
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
x=0, y=0,
flux=1
):
"""Create RayVector with one stop surface point but many field angles.
This method is similar to `fromStop` but broadcasts over ``theta_x``
and ``theta_y`` instead of over ``x`` and ``y``. There is less
currently less effort paid to synchronizing the ``t`` values of the
created rays, as they don't correspond to points on a physical incoming
wavefront in this case. The primary intended use case is to map chief
rays (``x``=``y``=0) from incoming field angle to focal plane position.
Parameters
----------
theta_x, theta_y : ndarray
Field angles in radians.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, and ``stopSurface`` from the Optic. Note that values
explicitly passed here as keyword arguments override those
extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface this far. This should
generally be set large enough that any obscurations or phantom
surfaces occuring before the stop surface are now "in front" of the
rays. If this keyword is set to ``None`` and the ``optic`` keyword
is set, then infer a value from ``optic.backDist``. If both this
keyword and ``optic`` are ``None``, then use a default of 40 meters,
which should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of rays. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
x, y : float
X/Y coordinates on the stop surface where the rays would intersect
if not refracted or reflected first.
flux : float, optional
Flux of rays. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if wavelength is None:
raise ValueError("Missing wavelength keyword")
vx, vy, vz = fieldToDirCos(theta_x, theta_y, projection=projection)
n = medium.getN(wavelength)
vx /= n
vy /= n
vz /= n
z = stopSurface.surface.sag(x, y)
x = np.full_like(vx, x)
y = np.full_like(vx, y)
z = np.full_like(vx, z)
t = np.zeros_like(vx)
rv = RayVector(
x, y, z,
vx, vy, vz,
t, wavelength, flux,
coordSys=stopSurface.coordSys
)
rv.propagate(-backDist*n)
return rv
@property
def r(self):
"""ndarray of float, shape (n, 3): Positions of rays in meters."""
self._rv.x.syncToHost()
self._rv.y.syncToHost()
self._rv.z.syncToHost()
return np.array([self._x, self._y, self._z]).T
@property
def x(self):
"""The x components of ray positions in meters."""
self._rv.x.syncToHost()
return self._x
@property
def y(self):
"""The y components of ray positions in meters."""
self._rv.y.syncToHost()
return self._y
@property
def z(self):
"""The z components of ray positions in meters."""
self._rv.z.syncToHost()
return self._z
@property
def v(self):
"""ndarray of float, shape (n, 3): Velocities of rays in units of the
speed of light in vacuum. Note that these may have magnitudes < 1 if
the rays are inside a refractive medium.
"""
self._rv.vx.syncToHost()
self._rv.vy.syncToHost()
self._rv.vz.syncToHost()
return np.array([self._vx, self._vy, self._vz]).T
@property
def vx(self):
"""The x components of ray velocities units of the vacuum speed of
light.
"""
self._rv.vx.syncToHost()
return self._vx
@property
def vy(self):
"""The y components of ray velocities units of the vacuum speed of
light.
"""
self._rv.vy.syncToHost()
return self._vy
@property
def vz(self):
"""The z components of ray velocities units of the vacuum speed of
light.
"""
self._rv.vz.syncToHost()
return self._vz
@property
def t(self):
"""Reference times (divided by the speed of light in vacuum) in units
of meters, also known as the optical path lengths.
"""
self._rv.t.syncToHost()
return self._t
@property
def wavelength(self):
"""Vacuum wavelengths in meters."""
# wavelength is constant, so no need to synchronize
return self._wavelength
@property
def flux(self):
"""Fluxes in arbitrary units."""
self._rv.flux.syncToHost()
return self._flux
@property
def vignetted(self):
"""True for rays that have been vignetted."""
self._rv.vignetted.syncToHost()
return self._vignetted
@property
def failed(self):
"""True for rays that have failed. This may occur, for example, if
batoid failed to find the intersection of a ray wiht a surface.
"""
self._rv.failed.syncToHost()
return self._failed
@property
def k(self):
r"""ndarray of float, shape (n, 3): Wavevectors of plane waves in units
of radians per meter. The magnitude of each wavevector is equal to
:math:`2 \pi n / \lambda`, where :math:`n` is the refractive index and
:math:`\lambda` is the wavelength.
"""
v = self.v
out = 2*np.pi*v
out /= self.wavelength[:, None]
out /= np.sum(v*v, axis=-1)[:, None]
return out
@property
def kx(self):
"""The x component of each ray wavevector in radians per meter."""
return self.k[:,0]
@property
def ky(self):
"""The y component of each ray wavevector in radians per meter."""
return self.k[:,1]
@property
def kz(self):
"""The z component of each ray wavevector in radians per meter."""
return self.k[:,2]
@property
def omega(self):
r"""The temporal angular frequency of each plane wave divided by the
vacuum speed of light in units of radians per meter. Equals
:math:`2 \pi / \lambda`.
"""
return 2*np.pi/self.wavelength
@lazy_property
def _rv(self):
return _batoid.CPPRayVector(
self._x.ctypes.data, self._y.ctypes.data, self._z.ctypes.data,
self._vx.ctypes.data, self._vy.ctypes.data, self._vz.ctypes.data,
self._t.ctypes.data,
self._wavelength.ctypes.data, self._flux.ctypes.data,
self._vignetted.ctypes.data, self._failed.ctypes.data,
len(self._wavelength)
)
def _syncToHost(self):
if "_rv" not in self.__dict__:
# Was never copied to device, so still synchronized.
return
self._rv.x.syncToHost()
self._rv.y.syncToHost()
self._rv.z.syncToHost()
self._rv.vx.syncToHost()
self._rv.vy.syncToHost()
self._rv.vz.syncToHost()
self._rv.t.syncToHost()
self._rv.wavelength.syncToHost()
self._rv.flux.syncToHost()
self._rv.vignetted.syncToHost()
self._rv.failed.syncToHost()
def _syncToDevice(self):
self._rv.x.syncToHost()
self._rv.y.syncToHost()
self._rv.z.syncToHost()
self._rv.vx.syncToHost()
self._rv.vy.syncToHost()
self._rv.vz.syncToHost()
self._rv.t.syncToDevice()
self._rv.wavelength.syncToDevice()
self._rv.flux.syncToDevice()
self._rv.vignetted.syncToDevice()
self._rv.failed.syncToDevice()
def copy(self):
# copy on host side for now...
self._syncToHost()
ret = RayVector.__new__(RayVector)
ret._x = np.copy(self._x)
ret._y = np.copy(self._y)
ret._z = np.copy(self._z)
ret._vx = np.copy(self._vx)
ret._vy = np.copy(self._vy)
ret._vz = np.copy(self._vz)
ret._t = np.copy(self._t)
ret._wavelength = np.copy(self._wavelength)
ret._flux = np.copy(self._flux)
ret._vignetted = np.copy(self._vignetted)
ret._failed = np.copy(self._failed)
ret.coordSys = self.coordSys.copy()
return ret
def toCoordSys(self, coordSys):
"""Transform this RayVector into a new coordinate system.
Parameters
----------
coordSys: batoid.CoordSys
Destination coordinate system.
Returns
-------
RayVector
Reference to self, no copy is made.
"""
transform = CoordTransform(self.coordSys, coordSys)
applyForwardTransform(transform, self)
return self
def __len__(self):
return self._t.size
def __eq__(self, rhs):
return self._rv == rhs._rv
def __ne__(self, rhs):
return self._rv != rhs._rv
def __repr__(self):
out = f"RayVector({self.x!r}, {self.y!r}, {self.z!r}"
out += f", {self.vx!r}, {self.vy!r}, {self.vz!r}"
out += f", {self.t!r}, {self.wavelength!r}, {self.flux!r}"
out += f", {self.vignetted!r}, {self.failed!r}, {self.coordSys!r})"
return out
def __getstate__(self):
return (
self.x, self.y, self.z,
self.vx, self.vy, self.vz,
self.t,
self.wavelength, self.flux,
self.vignetted, self.failed, self.coordSys
)
def __setstate__(self, args):
(self._x, self._y, self._z,
self._vx, self._vy, self._vz, self._t,
self._wavelength, self._flux, self._vignetted,
self._failed, self.coordSys) = args
def __getitem__(self, idx):
if isinstance(idx, int):
if idx >= 0:
if idx >= self._rv.t.size:
msg = "index {} is out of bounds for axis 0 with size {}"
msg = msg.format(idx, self._rv.t.size)
raise IndexError(msg)
idx = slice(idx, idx+1)
else:
if idx < -self._rv.t.size:
msg = "index {} is out of bounds for axis 0 with size {}"
msg = msg.format(idx, self._rv.t.size)
raise IndexError(msg)
idx = slice(self._rv.t.size+idx, self._rv.t.size-idx+1)
self._syncToHost()
return RayVector._directInit(
np.copy(self._x[idx]),
np.copy(self._y[idx]),
np.copy(self._z[idx]),
np.copy(self._vx[idx]),
np.copy(self._vy[idx]),
np.copy(self._vz[idx]),
np.copy(self._t[idx]),
np.copy(self._wavelength[idx]),
np.copy(self._flux[idx]),
np.copy(self._vignetted[idx]),
np.copy(self._failed[idx]),
self.coordSys
)
def concatenateRayVectors(rvs):
return RayVector(
np.hstack([rv.x for rv in rvs]),
np.hstack([rv.y for rv in rvs]),
np.hstack([rv.z for rv in rvs]),
np.hstack([rv.vx for rv in rvs]),
np.hstack([rv.vy for rv in rvs]),
np.hstack([rv.vz for rv in rvs]),
np.hstack([rv.t for rv in rvs]),
np.hstack([rv.wavelength for rv in rvs]),
np.hstack([rv.flux for rv in rvs]),
np.hstack([rv.vignetted for rv in rvs]),
np.hstack([rv.failed for rv in rvs]),
rvs[0].coordSys
)
| jmeyers314/batoid | batoid/rayVector.py | Python | bsd-2-clause | 52,447 | [
"Gaussian"
] | 2c7719df4d8633674b8cd201bdf16cbe87fe54b17427453ca9fc3b1ee4e98e2e |
import unittest
from noaaclass import noaaclass
from datetime import datetime
import time
class TestGvarimg(unittest.TestCase):
def remove_all_in_server(self):
sub_data = self.noaa.subscribe.gvar_img.get()
ids = [d['id'] for d in sub_data if '[auto]' in d['name']]
if len(ids):
self.noaa.get('sub_delete?actionbox=%s' % '&actionbox='.join(ids))
def init_subscribe_data(self):
self.sub_data = [
{'id': '+',
'enabled': True,
'name': '[auto] sample1',
'north': -26.72,
'south': -43.59,
'west': -71.02,
'east': -48.52,
'coverage': ['SH'],
'schedule': ['R'],
'satellite': ['G13'],
'channel': [1],
'format': 'NetCDF',
},
{'id': '+',
'enabled': False,
'name': '[auto] sample2',
'north': -26.73,
'south': -43.52,
'west': -71.06,
'east': -48.51,
'coverage': ['SH'],
'schedule': ['R'],
'satellite': ['G13'],
'channel': [2],
'format': 'NetCDF',
},
{'id': '+',
'enabled': True,
'name': 'static',
'north': -26.73,
'south': -33.52,
'west': -61.06,
'east': -48.51,
'coverage': ['SH'],
'schedule': ['R'],
'satellite': ['G13'],
'channel': [1],
'format': 'NetCDF',
},
]
old_data = self.noaa.subscribe.gvar_img.get()
names = [d['name'] for d in self.sub_data]
self.sub_data.extend(filter(lambda x: x['name'] not in names,
old_data))
def init_request_data(self):
self.req_data = [
{'id': '+',
'north': -26.72,
'south': -43.59,
'west': -71.02,
'east': -48.52,
'coverage': ['SH'],
'schedule': ['R'],
'satellite': ['G13'],
'channel': [1],
'format': 'NetCDF',
'start': datetime(2014, 9, 16, 10, 0, 0),
'end': datetime(2014, 9, 16, 17, 59, 59)
},
{'id': '+',
'north': -26.73,
'south': -43.52,
'west': -71.06,
'east': -48.51,
'coverage': ['SH'],
'schedule': ['R'],
'satellite': ['G13'],
'channel': [2],
'format': 'NetCDF',
'start': datetime(2014, 9, 2, 10, 0, 0),
'end': datetime(2014, 9, 3, 17, 59, 59)
},
]
def setUp(self):
self.noaa = noaaclass.connect('noaaclass.t', 'noaaclassadmin')
self.init_subscribe_data()
self.init_request_data()
self.remove_all_in_server()
def tearDown(self):
self.remove_all_in_server()
def test_subscribe_get_empty(self):
gvar_img = self.noaa.subscribe.gvar_img
auto = lambda x: '[auto]' in x['name']
data = filter(auto, gvar_img.get())
self.assertEquals(data, [])
def test_subscribe_get(self):
gvar_img = self.noaa.subscribe.gvar_img
gvar_img.set(self.sub_data)
data = gvar_img.get(append_files=True)
for subscription in data:
for key in ['id', 'enabled', 'name', 'coverage', 'schedule',
'south', 'north', 'west', 'east', 'satellite',
'format', 'orders']:
self.assertIn(key, subscription.keys())
for order in subscription['orders']:
for key in ['id', 'last_activity', 'status', 'size', 'files',
'datetime']:
self.assertIn(key, order.keys())
def test_subscribe_set_new_elements(self):
gvar_img = self.noaa.subscribe.gvar_img
copy = gvar_img.set(self.sub_data)
self.assertGreaterEqual(len(copy), len(self.sub_data))
[self.assertIn(k, copy[i].keys())
for i in range(len(self.sub_data)) for k in self.sub_data[i].keys()]
[self.assertEquals(copy[i][k], v)
for i in range(len(self.sub_data))
for k, v in self.sub_data[i].items()
if k is not 'id']
def test_subscribe_set_edit_elements(self):
gvar_img = self.noaa.subscribe.gvar_img
copy = gvar_img.set(self.sub_data)
self.assertGreaterEqual(len(copy), 2)
copy[0]['name'] = '[auto] name changed'
copy[1]['channel'] = [4, 5]
gvar_img.set(copy)
edited = gvar_img.get()
self.assertEquals(edited[0]['name'], copy[0]['name'])
self.assertEquals(edited[1]['channel'], copy[1]['channel'])
def test_subscribe_set_remove_element(self):
gvar_img = self.noaa.subscribe.gvar_img
copy = gvar_img.set(self.sub_data, async=True)
self.assertEquals(gvar_img.get(), copy)
criteria = lambda x: 'sample1' not in x['name']
copy = filter(criteria, copy)
gvar_img.set(copy)
self.assertEquals(gvar_img.get(), copy)
def test_request_get(self):
gvar_img = self.noaa.request.gvar_img
for order in gvar_img.get():
for key in ['id', 'status', 'datetime', 'format', 'files']:
self.assertIn(key, order.keys())
def assertEqualsRequests(self, obtained, original):
avoid = ['coverage', 'end', 'start',
'satellite', 'schedule', 'id', 'north',
'south', 'east', 'west']
asymetric = lambda x: x not in avoid
if not obtained['files']['http']:
avoid.extend(['format', 'channel'])
for k in filter(asymetric, original.keys()):
self.assertIn(k, obtained.keys())
if isinstance(original[k], float):
self.assertEqual(int(obtained[k]), int(original[k]))
elif isinstance(original[k], datetime):
self.assertEqual(obtained[k].toordinal(),
original[k].toordinal())
else:
self.assertEquals(obtained[k], original[k])
def test_request_set_new(self):
time.sleep(40)
gvar_img = self.noaa.request.gvar_img
data = gvar_img.get(async=True)
data.extend(self.req_data)
copy = gvar_img.set(data, async=True)
self.assertEquals(len(copy), len(data))
[self.assertEqualsRequests(copy[i], data[i])
for i in range(len(data))]
time.sleep(40)
def test_request_set_without_auto_get(self):
time.sleep(40)
gvar_img = self.noaa.request.gvar_img
data = gvar_img.get(async=True)
data.extend(self.req_data)
large_start = datetime.now()
copy = gvar_img.set(data, async=True, auto_get=True)
large_end = datetime.now()
start = datetime.now()
copy = gvar_img.set(data, async=True, auto_get=False)
end = datetime.now()
diff = lambda end, start: (end - start).total_seconds()
self.assertGreaterEqual(diff(large_end, large_start), diff(end, start))
time.sleep(40)
if __name__ == '__main__':
unittest.main()
| gersolar/noaaclass | tests/gvarimg_test.py | Python | mit | 7,304 | [
"NetCDF"
] | 6c6eae5c74bce8e6da02507f85b417fa31ed786de8500607690e9012381ee033 |
#
# Get coordination count from LAMMPS custom dump command
# (ID,x,y,z,coordination) for each timestep
#
# Gonzalo Aguirre <graguirre@gmail.com>
#
import sys, getopt
def usage():
print >> sys.stderr, "Options:"
print >> sys.stderr, " -h Show help"
print >> sys.stderr, " -i <inputfile> Input file"
print >> sys.stderr, "Syntax: python2 parser.py -i data.txt"
sys.exit(1)
def main(argv):
# variables
tf = 0 # timestep flag
nf = 0 # number flag
af = 0 # atoms falg
t = 0 # time
n = 0 # number of atoms
mc = 5 # max coordination number
d = {} # init dictionary
inputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:")
except getopt.GetoptError:
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt == '-i':
inputfile = arg
if inputfile == '':
usage()
try:
fid = open(inputfile,'r')
except IOError:
print >> sys.stderr, "ERROR: File "+inputfile+" not found."
sys.exit(2)
for i in fid:
# don't alter the if statement order
if tf:
t = int(i) # get timestep
tf = 0 # unset flag
d[t]=[0 for j in range(mc)]
if i.find('ITEM: TIMESTEP') != -1:
tf = 1 # set falg
if nf:
n = int(i) # get number
nf = 0 # unset flag
if i.find('ITEM: NUMBER') != -1:
nf = 1 # set flag
if n == 0:
af = 0 # unset flag
if af:
l = i.split()
d[t][int(l[4])] += 1
n -= 1
if i.find('ITEM: ATOMS') != -1:
af = 1
for i in d:
print i,' '.join(str(j) for j in d[i])
if __name__ == "__main__":
main(sys.argv[1:])
| graguirre/Coord-LAMMPS | parser.py | Python | gpl-2.0 | 1,504 | [
"LAMMPS"
] | e260d0f8e934f501f04df61b3eaafd0eb405b18cc65a9e4dc5df29d18f0b74bd |
'''Convert to and from Roman numerals
This program is part of 'Dive Into Python 3', a free Python book for
experienced programmers. Visit http://diveintopython3.org/ for the
latest version.
'''
class OutOfRangeError(ValueError): pass
class NotIntegerError(ValueError): pass
roman_numeral_map = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def to_roman(n):
'''convert integer to Roman numeral'''
if not (0 < n < 4000):
raise OutOfRangeError('number out of range (must be 1..3999)')
if not isinstance(n, int):
raise NotIntegerError('non-integers can not be converted')
result = ''
for numeral, integer in roman_numeral_map:
while n >= integer:
result += numeral
n -= integer
return result
def from_roman(s):
'''convert Roman numeral to integer'''
result = 0
index = 0
for numeral, integer in roman_numeral_map:
while s[index : index + len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
# Copyright (c) 2009, Mark Pilgrim, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| ctasims/Dive-Into-Python-3 | examples/roman5.py | Python | mit | 2,748 | [
"VisIt"
] | b016c90abf6dd5bf130210e47fa06cf3449dfcbc2f28a2ffd3897df891e72a4a |
import numpy as np
from ase.data import atomic_numbers as ref_atomic_numbers
from ase.lattice.spacegroup import Spacegroup
from ase.cluster.base import ClusterBase
from ase.cluster.cluster import Cluster
class ClusterFactory(ClusterBase):
directions = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
atomic_basis = np.array([[0., 0., 0.]])
element_basis = None
def __call__(self, symbols, surfaces, layers, latticeconstant=None,
center=None, vacuum=0.0, debug=0):
self.debug = debug
# Interpret symbol
self.set_atomic_numbers(symbols)
# Interpret lattice constant
if latticeconstant is None:
if self.element_basis is None:
self.lattice_constant = self.get_lattice_constant()
else:
raise ValueError("A lattice constant must be specified for a compound")
else:
self.lattice_constant = latticeconstant
self.set_basis()
if self.debug:
print "Lattice constant(s):", self.lattice_constant
print "Lattice basis:\n", self.lattice_basis
print "Resiprocal basis:\n", self.resiproc_basis
print "Atomic basis:\n", self.atomic_basis
self.set_surfaces_layers(surfaces, layers)
self.set_lattice_size(center)
if self.debug:
print "Center position:", self.center.round(2)
print "Base lattice size:", self.size
cluster = self.make_cluster(vacuum)
cluster.symmetry = self.xtal_name
cluster.surfaces = self.surfaces.copy()
cluster.lattice_basis = self.lattice_basis.copy()
cluster.atomic_basis = self.atomic_basis.copy()
cluster.resiproc_basis = self.resiproc_basis.copy()
return cluster
def make_cluster(self, vacuum):
# Make the base crystal by repeating the unit cell
size = np.array(self.size)
translations = np.zeros((size.prod(), 3))
for h in range(size[0]):
for k in range(size[1]):
for l in range(size[2]):
i = h * (size[1] * size[2]) + k * size[2] + l
translations[i] = np.dot([h, k, l], self.lattice_basis)
atomic_basis = np.dot(self.atomic_basis, self.lattice_basis)
positions = np.zeros((len(translations) * len(atomic_basis), 3))
numbers = np.zeros(len(positions))
n = len(atomic_basis)
for i, trans in enumerate(translations):
positions[n*i:n*(i+1)] = atomic_basis + trans
numbers[n*i:n*(i+1)] = self.atomic_numbers
# Remove all atoms that is outside the defined surfaces
for s, l in zip(self.surfaces, self.layers):
n = self.miller_to_direction(s)
rmax = self.get_layer_distance(s, l + 0.1)
r = np.dot(positions - self.center, n)
mask = np.less(r, rmax)
if self.debug > 1:
print "Cutting %s at %i layers ~ %.3f A" % (s, l, rmax)
positions = positions[mask]
numbers = numbers[mask]
# Fit the cell, so it only just consist the atoms
min = np.zeros(3)
max = np.zeros(3)
for i in range(3):
v = self.directions[i]
r = np.dot(positions, v)
min[i] = r.min()
max[i] = r.max()
cell = max - min + vacuum
positions = positions - min + vacuum / 2.0
self.center = self.center - min + vacuum / 2.0
return Cluster(symbols=numbers, positions=positions, cell=cell)
def set_atomic_numbers(self, symbols):
"Extract atomic number from element"
# The types that can be elements: integers and strings
atomic_numbers = []
if self.element_basis is None:
if isinstance(symbols, str):
atomic_numbers.append(ref_atomic_numbers[symbols])
elif isinstance(symbols, int):
atomic_numbers.append(symbols)
else:
raise TypeError("The symbol argument must be a " +
"string or an atomic number.")
element_basis = [0] * len(self.atomic_basis)
else:
if isinstance(symbols, (list, tuple)):
nsymbols = len(symbols)
else:
nsymbols = 0
nelement_basis = max(self.element_basis) + 1
if nsymbols != nelement_basis:
raise TypeError("The symbol argument must be a sequence " +
"of length %d" % (nelement_basis,) +
" (one for each kind of lattice position")
for s in symbols:
if isinstance(s, str):
atomic_numbers.append(ref_atomic_numbers[s])
elif isinstance(s, int):
atomic_numbers.append(s)
else:
raise TypeError("The symbol argument must be a " +
"string or an atomic number.")
element_basis = self.element_basis
self.atomic_numbers = [atomic_numbers[n] for n in element_basis]
assert len(self.atomic_numbers) == len(self.atomic_basis)
def set_lattice_size(self, center):
if center is None:
offset = np.zeros(3)
else:
offset = np.array(center)
if (offset > 1.0).any() or (offset < 0.0).any():
raise ValueError("Center offset must lie within the lattice unit \
cell.")
max = np.ones(3)
min = -np.ones(3)
v = np.linalg.inv(self.lattice_basis.T)
for s, l in zip(self.surfaces, self.layers):
n = self.miller_to_direction(s) * self.get_layer_distance(s, l)
k = np.round(np.dot(v, n), 2)
for i in range(3):
if k[i] > 0.0:
k[i] = np.ceil(k[i])
elif k[i] < 0.0:
k[i] = np.floor(k[i])
if self.debug > 1:
print "Spaning %i layers in %s in lattice basis ~ %s" % (l, s, k)
max[k > max] = k[k > max]
min[k < min] = k[k < min]
self.center = np.dot(offset - min, self.lattice_basis)
self.size = (max - min + np.ones(3)).astype(int)
def set_surfaces_layers(self, surfaces, layers):
if len(surfaces) != len(layers):
raise ValueError("Improper size of surface and layer arrays: %i != %i"
% (len(surfaces), len(layers)))
sg = Spacegroup(self.spacegroup)
surfaces = np.array(surfaces)
layers = np.array(layers)
for i, s in enumerate(surfaces):
s = reduce_miller(s)
surfaces[i] = s
surfaces_full = surfaces.copy()
layers_full = layers.copy()
for s, l in zip(surfaces, layers):
equivalent_surfaces = sg.equivalent_reflections(s.reshape(-1, 3))
for es in equivalent_surfaces:
# If the equivalent surface (es) is not in the surface list,
# then append it.
if not np.equal(es, surfaces_full).all(axis=1).any():
surfaces_full = np.append(surfaces_full, es.reshape(1, 3), axis=0)
layers_full = np.append(layers_full, l)
self.surfaces = surfaces_full.copy()
self.layers = layers_full.copy()
def get_resiproc_basis(self, basis):
"""Returns the resiprocal basis to a given lattice (crystal) basis"""
k = 1 / np.dot(basis[0], cross(basis[1], basis[2]))
# The same as the inversed basis matrix transposed
return k * np.array([cross(basis[1], basis[2]),
cross(basis[2], basis[0]),
cross(basis[0], basis[1])])
# Helping functions
def cross(a, b):
"""The cross product of two vectors."""
return np.array([a[1]*b[2] - b[1]*a[2],
a[2]*b[0] - b[2]*a[0],
a[0]*b[1] - b[0]*a[1]])
def GCD(a,b):
"""Greatest Common Divisor of a and b."""
#print "--"
while a != 0:
#print a,b,">",
a,b = b%a,a
#print a,b
return b
def reduce_miller(hkl):
"""Reduce Miller index to the lowest equivalent integers."""
hkl = np.array(hkl)
old = hkl.copy()
d = GCD(GCD(hkl[0], hkl[1]), hkl[2])
while d != 1:
hkl = hkl / d
d = GCD(GCD(hkl[0], hkl[1]), hkl[2])
if np.dot(old, hkl) > 0:
return hkl
else:
return -hkl
| grhawk/ASE | tools/ase/cluster/factory.py | Python | gpl-2.0 | 8,613 | [
"ASE",
"CRYSTAL"
] | 8f3f8f94fb584ca304c9502d3cf9e577c84bfb71c39cd8d0a524290086615905 |
# -*- coding: utf8 -*-
"""
"""
__author__ = "Jérôme Samson"
__copyright__ = "Copyright 2014, Mikros Image"
import os
import sys
import csv
import time
import datetime
from optparse import OptionParser
import numpy as np
import pygal
from pygal.style import *
try:
import simplejson as json
except ImportError:
import json
from octopus.dispatcher import settings
from octopus.core import singletonconfig
from pulitools.common import roundTime
from pulitools.common import lowerQuartile, higherQuartile
###########################################################################################################################
# Data example:
# {
# "prod":{
# "ddd" : { "jobs":15, "err":1, "paused":2, "ready/blocked":10, "running":2, "allocatedRN":5, "readyCommandCount":15},
# "dior_tea" : { "jobs":1, "err":0, "paused":0, "ready/blocked":0, "running":1, "allocatedRN":1, "readyCommandCount":15},
# },
# "user":{
# "brr" : { "jobs":15, "err":1, "paused":2, "ready/blocked":10, "running":2 , "allocatedRN":5, "readyCommandCount":15},
# "bho" : { "jobs":1, "err":0, "paused":0, "ready/blocked":0, "running":1 , "allocatedRN":1, "readyCommandCount":15},
# "lap" : { "jobs":1, "err":0, "paused":0, "ready/blocked":0, "running":1 , "allocatedRN":1, "readyCommandCount":15},
# },
# "step":{
# ...
# },
# "type":{
# ...
# },
# "total": { "jobs":15, "err":1, "paused":2, "ready/blocked":10, "running":2 , "allocatedRN":5, "readyCommandCount":150}
# "requestDate": "Wed Apr 2 12:16:01 2014"
# }
def process_args():
'''
Manages arguments parsing definition and help information
'''
usage = "usage: %prog [general options] [restriction list] [output option]"
desc="""Displays information.
"""
parser = OptionParser(usage=usage, description=desc, version="%prog 0.1" )
parser.add_option( "-f", action="store", dest="sourceFile", default=os.path.join(settings.LOGDIR, "usage_stats.log"), help="Source file" )
parser.add_option( "-o", action="store", dest="outputFile", default="./queue_avg.svg", help="Target output file." )
parser.add_option( "-v", action="store_true", dest="verbose", help="Verbose output" )
parser.add_option( "-s", action="store", dest="rangeIn", type="int", help="Start range is N hours in past", default=3 )
parser.add_option( "-e", action="store", dest="rangeOut", type="int", help="End range is N hours in past (mus be lower than '-s option'", default=0 )
parser.add_option( "-t", "--title", action="store", dest="title", help="Indicates a title", default="Queue usage over time")
parser.add_option( "-r", "--res", action="store", dest="resolution", type="int", help="Indicates ", default=10 )
parser.add_option( "--stack", action="store_true", dest="stacked", default=False)
parser.add_option( "--line", action="store_true", dest="line", default=True)
parser.add_option( "--log", action="store_true", dest="logarithmic", help="Display graph with a logarithmic scale", default=False )
parser.add_option( "--scale", action="store", dest="scaleEvery", type="int", help="Indicates the number of scale values to display", default=8 )
options, args = parser.parse_args()
return options, args
if __name__ == "__main__":
options, args = process_args()
VERBOSE = options.verbose
if VERBOSE:
print "Command options: %s" % options
print "Command arguments: %s" % args
if len(args) is not 2:
print "Error: 2 fields must be specified."
sys.exit(1)
else:
groupField = args[0]
graphValue = args[1]
if options.rangeIn < options.rangeOut:
print "Invalid start/end range"
sys.exit()
startDate = time.time() - 3600 * options.rangeIn
endDate = time.time() - 3600 * options.rangeOut
if VERBOSE:
print "Loading stats: %r " % options.sourceFile
print " - from: %r " % datetime.date.fromtimestamp(startDate)
print " - to: %r " % datetime.date.fromtimestamp(endDate)
print "Start."
strScale=[]
scale=[]
data2Dim = {}
log = []
#
# Load json log and filter by date
#
with open(options.sourceFile, "r" ) as f:
for line in f:
data = json.loads(line)
if (startDate < data['requestDate'] and data['requestDate'] <= endDate):
log.append( json.loads(line) )
for i, data in enumerate(log):
eventDate = datetime.datetime.fromtimestamp( data['requestDate'] )
for key, val in data[ groupField ].items():
if key not in data2Dim:
data2Dim[key] = np.array( [0]*len(log) )
data2Dim[key][i] = val[ graphValue ]
scale.append( eventDate )
stepSize = len(scale) / options.resolution
newshape = (options.resolution, stepSize)
useableSize = len(scale) - ( len(scale) % options.resolution )
avgData = {}
if VERBOSE:
print "stepSize=%d" % stepSize
print "useableSize=%d" % useableSize
for dataset in data2Dim.keys():
# print "%s = %d - %r" % (dataset, len(data2Dim[dataset]), data2Dim[dataset])
avgData[dataset] = np.mean( np.reshape(data2Dim[dataset][-useableSize:], newshape), axis=1)
# working = np.array(nb_working[-useableSize:])
# unknown = np.array(nb_unknown[-useableSize:])
# paused = np.array(nb_paused[-useableSize:])
# # print ("working %d = %r" % (len(working), working) )
# # print ("reshape %d = %r" % (len(newshape), newshape) )
# avg_working= np.mean( np.reshape(working, newshape), axis=1)
# avg_paused= np.mean( np.reshape(paused, newshape), axis=1)
# avg_unknown= np.mean( np.reshape(unknown, newshape), axis=1)
# # med= np.median(data, axis=1)
# # amin= np.min(data, axis=1)
# # amax= np.max(data, axis=1)
# # q1= lowerQuartile(data)
# # q2= higherQuartile(data)
# # std= np.std(data, axis=1)
strScale = [''] * options.resolution
tmpscale = np.reshape(scale[-useableSize:], newshape)
# # print ("tmp scale %d = %r" % (len(tmpscale), tmpscale) )
# # print ("str scale %d = %r" % (len(strScale), strScale) )
for i,date in enumerate(tmpscale[::len(tmpscale)/options.scaleEvery]):
newIndex = i*len(tmpscale)/options.scaleEvery
if newIndex < len(strScale):
strScale[newIndex] = date[0].strftime('%H:%M')
strScale[0] = scale[0].strftime('%Y-%m-%d %H:%M')
strScale[-1] = scale[-1].strftime('%Y-%m-%d %H:%M')
if VERBOSE:
print ("newshape %d = %r" % (len(newshape), newshape) )
print ("data2Dim %d = %r" % (len(data2Dim), data2Dim) )
print ("scale %d = %r" % (len(strScale), strScale) )
if VERBOSE:
print "Num events: %d" % len(scale)
print "Creating graph."
if options.stacked:
avg_usage = pygal.StackedLine( x_label_rotation=30,
include_x_axis=True,
logarithmic=options.logarithmic,
show_dots=False,
width=800,
height=300,
fill=True,
interpolate='hermite',
interpolation_parameters={'type': 'cardinal', 'c': 1.0},
interpolation_precision=3,
style=RedBlueStyle
)
else:
avg_usage = pygal.Line( x_label_rotation=30,
include_x_axis=True,
logarithmic=options.logarithmic,
show_dots=True,
width=800,
height=300,
interpolate='hermite',
interpolation_parameters={'type': 'cardinal', 'c': 1.0},
interpolation_precision=3,
style=RedBlueStyle
)
avg_usage.title = options.title
avg_usage.x_labels = strScale
for key,val in avgData.items():
avg_usage.add(key, val )
avg_usage.render_to_file( options.outputFile )
if VERBOSE:
print "Done."
| mikrosimage/OpenRenderManagement | src/pulitools/stats/display_2dim_usage.py | Python | bsd-3-clause | 8,423 | [
"Octopus"
] | 783b84fb4263dc4c09a609ab74fb919875ad546a2d145c09c5f65352e7a223df |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# createvgrid - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.createvgrid import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
| heromod/migrid | mig/cgi-bin/createvgrid.py | Python | gpl-2.0 | 1,110 | [
"Brian"
] | d7ea7f9c02abce765941bf061f1147a17d37120287bed825e991803346c59fa6 |
"""semi-views for the `group_messaging` application
These are not really views - rather context generator
functions, to be used separately, when needed.
For example, some other application can call these
in order to render messages within the page.
Notice that :mod:`urls` module decorates all these functions
and turns them into complete views
"""
import copy
import datetime
from django.template.loader import get_template
from django.contrib.auth.models import User
from django.db import models
from django.forms import IntegerField
from django.http import HttpResponse
from django.http import HttpResponseNotAllowed
from django.http import HttpResponseForbidden
from django.utils import simplejson
from group_messaging.models import Message
from group_messaging.models import MessageMemo
from group_messaging.models import SenderList
from group_messaging.models import LastVisitTime
from group_messaging.models import get_personal_group_by_user_id
from group_messaging.models import get_personal_groups_for_users
class InboxView(object):
"""custom class-based view
to be used for pjax use and for generation
of content in the traditional way, where
the only the :method:`get_context` would be used.
"""
template_name = None #used only for the "GET" method
http_method_names = ('GET', 'POST')
def render_to_response(self, context, template_name=None):
"""like a django's shortcut, except will use
template_name from self, if `template_name` is not given.
Also, response is packaged as json with an html fragment
for the pjax consumption
"""
if template_name is None:
template_name = self.template_name
template = get_template(template_name)
html = template.render(context)
json = simplejson.dumps({'html': html, 'success': True})
return HttpResponse(json, content_type='application/json')
def get(self, request, *args, **kwargs):
"""view function for the "GET" method"""
context = self.get_context(request, *args, **kwargs)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
"""view function for the "POST" method"""
pass
def dispatch(self, request, *args, **kwargs):
"""checks that the current request method is allowed
and calls the corresponding view function"""
if request.method not in self.http_method_names:
return HttpResponseNotAllowed()
view_func = getattr(self, request.method.lower())
return view_func(request, *args, **kwargs)
def get_context(self, request, *args, **kwargs):
"""Returns the context dictionary for the "get"
method only"""
return {}
def as_view(self):
"""returns the view function - for the urls.py"""
def view_function(request, *args, **kwargs):
"""the actual view function"""
if request.user.is_authenticated() and request.is_ajax():
view_method = getattr(self, request.method.lower())
return view_method(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return view_function
class NewThread(InboxView):
"""view for creation of new thread"""
http_method_list = ('POST',)
def post(self, request):
"""creates a new thread on behalf of the user
response is blank, because on the client side we just
need to go back to the thread listing view whose
content should be cached in the client'
"""
usernames = request.POST['to_usernames']
usernames = map(lambda v: v.strip(), usernames.split(','))
users = User.objects.filter(username__in=usernames)
missing = copy.copy(usernames)
for user in users:
if user.username in missing:
missing.remove(user.username)
result = dict()
if missing:
result['success'] = False
result['missing_users'] = missing
if request.user.username in usernames:
result['success'] = False
result['self_message'] = True
if result.get('success', True):
recipients = get_personal_groups_for_users(users)
message = Message.objects.create_thread(
sender=request.user,
recipients=recipients,
text=request.POST['text']
)
result['success'] = True
result['message_id'] = message.id
return HttpResponse(simplejson.dumps(result), content_type='application/json')
class PostReply(InboxView):
"""view to create a new response"""
http_method_list = ('POST',)
def post(self, request):
parent_id = IntegerField().clean(request.POST['parent_id'])
parent = Message.objects.get(id=parent_id)
message = Message.objects.create_response(
sender=request.user,
text=request.POST['text'],
parent=parent
)
last_visit = LastVisitTime.objects.get(
message=message.root,
user=request.user
)
last_visit.at = datetime.datetime.now()
last_visit.save()
return self.render_to_response(
{'post': message, 'user': request.user},
template_name='group_messaging/stored_message.html'
)
class ThreadsList(InboxView):
"""shows list of threads for a given user"""
template_name = 'group_messaging/threads_list.html'
http_method_list = ('GET',)
def get_context(self, request):
"""returns thread list data"""
#get threads and the last visit time
sender_id = IntegerField().clean(request.REQUEST.get('sender_id', '-1'))
if sender_id == -2:
threads = Message.objects.get_threads(
recipient=request.user,
deleted=True
)
elif sender_id == -1:
threads = Message.objects.get_threads(recipient=request.user)
elif sender_id == request.user.id:
threads = Message.objects.get_sent_threads(sender=request.user)
else:
sender = User.objects.get(id=sender_id)
threads = Message.objects.get_threads(
recipient=request.user,
sender=sender
)
threads = threads.order_by('-last_active_at')
#for each thread we need to know if there is something
#unread for the user - to mark "new" threads as bold
threads_data = dict()
for thread in threads:
thread_data = dict()
#determine status
thread_data['status'] = 'new'
#determine the senders info
senders_names = thread.senders_info.split(',')
if request.user.username in senders_names:
senders_names.remove(request.user.username)
thread_data['senders_info'] = ', '.join(senders_names)
thread_data['thread'] = thread
threads_data[thread.id] = thread_data
ids = [thread.id for thread in threads]
counts = Message.objects.filter(
id__in=ids
).annotate(
responses_count=models.Count('descendants')
).values('id', 'responses_count')
for count in counts:
thread_id = count['id']
responses_count = count['responses_count']
threads_data[thread_id]['responses_count'] = responses_count
last_visit_times = LastVisitTime.objects.filter(
user=request.user,
message__in=threads
)
for last_visit in last_visit_times:
thread_data = threads_data[last_visit.message_id]
if thread_data['thread'].last_active_at <= last_visit.at:
thread_data['status'] = 'seen'
return {
'threads': threads,
'threads_count': threads.count(),
'threads_data': threads_data,
'sender_id': sender_id
}
class DeleteOrRestoreThread(ThreadsList):
"""subclassing :class:`ThreadsList`, because deletion
or restoring of thread needs subsequent refreshing
of the threads list"""
http_method_list = ('POST',)
def post(self, request, thread_id=None):
"""process the post request:
* delete or restore thread
* recalculate the threads list and return it for display
by reusing the threads list "get" function
"""
#part of the threads list context
sender_id = IntegerField().clean(request.POST['sender_id'])
#a little cryptic, but works - sender_id==-2 means deleted post
if sender_id == -2:
action = 'restore'
else:
action = 'delete'
thread = Message.objects.get(id=thread_id)
memo, created = MessageMemo.objects.get_or_create(
user=request.user,
message=thread
)
if action == 'delete':
memo.status = MessageMemo.ARCHIVED
else:
memo.status = MessageMemo.SEEN
memo.save()
context = self.get_context(request)
return self.render_to_response(context)
class SendersList(InboxView):
"""shows list of senders for a user"""
template_name = 'group_messaging/senders_list.html'
http_method_names = ('GET',)
def get_context(self, request):
"""get data about senders for the user"""
senders = SenderList.objects.get_senders_for_user(request.user)
senders = senders.values('id', 'username')
return {'senders': senders, 'request_user_id': request.user.id}
class ThreadDetails(InboxView):
"""shows entire thread in the unfolded form"""
template_name = 'group_messaging/thread_details.html'
http_method_names = ('GET',)
def get_context(self, request, thread_id=None):
"""shows individual thread"""
#todo: assert that current thread is the root
root = Message.objects.get(id=thread_id)
responses = Message.objects.filter(root__id=thread_id)
last_visit, created = LastVisitTime.objects.get_or_create(
message=root,
user=request.user
)
if created is False:
last_visit.at = datetime.datetime.now()
last_visit.save()
return {'root_message': root, 'responses': responses, 'request': request}
| coffenbacher/askbot-devel | askbot/deps/group_messaging/views.py | Python | gpl-3.0 | 11,261 | [
"VisIt"
] | ef20c61453e3e244e617856ff3cfd88a4c49e9b544ec2809c8987c7c0bf57341 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.