text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright 2016 the GPflow authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List
import numpy as np
import pytest
from numpy.testing import assert_array_equal, assert_array_less
import gpflow
from gpflow.base import AnyNDArray
# ------------------------------------------
# Data classes: storing constants
# ------------------------------------------
@dataclass(frozen=True)
class Datum:
rng: np.random.RandomState = np.random.RandomState(0)
X: AnyNDArray = rng.randn(100, 2)
Y: AnyNDArray = rng.randn(100, 1)
Z: AnyNDArray = rng.randn(10, 2)
Xs: AnyNDArray = rng.randn(10, 2)
lik = gpflow.likelihoods.Gaussian()
kernel = gpflow.kernels.Matern32()
default_datum = Datum()
_gp_models: List[gpflow.models.GPModel] = [
gpflow.models.VGP((default_datum.X, default_datum.Y), default_datum.kernel, default_datum.lik),
gpflow.models.GPMC((default_datum.X, default_datum.Y), default_datum.kernel, default_datum.lik),
gpflow.models.SGPMC(
(default_datum.X, default_datum.Y),
default_datum.kernel,
default_datum.lik,
inducing_variable=default_datum.Z,
),
gpflow.models.SGPR(
(default_datum.X, default_datum.Y),
default_datum.kernel,
inducing_variable=default_datum.Z,
),
gpflow.models.GPR((default_datum.X, default_datum.Y), default_datum.kernel),
gpflow.models.GPRFITC(
(default_datum.X, default_datum.Y),
default_datum.kernel,
inducing_variable=default_datum.Z,
),
]
_state_less_gp_models: List[gpflow.models.GPModel] = [
gpflow.models.SVGP(default_datum.kernel, default_datum.lik, inducing_variable=default_datum.Z)
]
@pytest.mark.parametrize("model", _state_less_gp_models + _gp_models)
def test_methods_predict_f(model: gpflow.models.GPModel) -> None:
mf, vf = model.predict_f(default_datum.Xs)
assert_array_equal(mf.shape, vf.shape)
assert_array_equal(mf.shape, (10, 1))
assert_array_less(np.full_like(vf, -1e-6), vf)
@pytest.mark.parametrize("model", _state_less_gp_models + _gp_models)
def test_methods_predict_y(model: gpflow.models.GPModel) -> None:
mf, vf = model.predict_y(default_datum.Xs)
assert_array_equal(mf.shape, vf.shape)
assert_array_equal(mf.shape, (10, 1))
assert_array_less(np.full_like(vf, -1e-6), vf)
@pytest.mark.parametrize("model", _state_less_gp_models + _gp_models)
def test_methods_predict_log_density(model: gpflow.models.GPModel) -> None:
rng = Datum().rng
Ys = rng.randn(10, 1)
d = model.predict_log_density((default_datum.Xs, Ys))
assert_array_equal(d.shape, (10,))
|
GPflow/GPflow
|
tests/gpflow/models/test_methods.py
|
Python
|
apache-2.0
| 3,160
|
[
"Gaussian"
] |
733ee74c764de2021b51ff229f0293ab96dd043b11b4d8df8c6d07d4237b4af8
|
"""
@name: PyHouse/src/Modules/_test/test_Scheduling.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2018 by D. Brian Kimmel
@license: MIT License
@note: Created on Jul 31, 2015
@Summary:
"""
__updated__ = '2019-07-29'
from twisted.trial import unittest, reporter, runner
from Modules.Housing.Schedules import _test as I_test
class Z_Suite(unittest.TestCase):
def setUp(self):
self.m_test = runner.TestLoader()
def test_Scheduling(self):
l_package = runner.TestLoader().loadPackage(I_test)
l_ret = reporter.Reporter()
l_package.run(l_ret)
l_ret.done()
#
print('\n====================\n*** test_Scheduling ***\n{}\n'.format(l_ret))
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/_test/test_Scheduling.py
|
Python
|
mit
| 769
|
[
"Brian"
] |
cf0ee9ab8ba251414d14263eef0227af950c9ba84774f9e253423f5bcc3b75b9
|
#!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Uoti Urpala and Matt Chisholm
from __future__ import division
from BitTorrent.platform import install_translation
install_translation()
import sys
import itertools
import math
import os
import threading
import datetime
import random
import atexit
assert sys.version_info >= (2, 3), _("Install Python %s or greater") % '2.3'
from BitTorrent import BTFailure, INFO, WARNING, ERROR, CRITICAL, status_dict, app_name
from BitTorrent import configfile
from BitTorrent.defaultargs import get_defaults
from BitTorrent.IPC import ipc_interface
from BitTorrent.prefs import Preferences
from BitTorrent.platform import doc_root, btspawn, path_wrap, os_version, is_frozen_exe, get_startup_dir, create_shortcut, remove_shortcut
from BitTorrent import zurllib
defaults = get_defaults('bittorrent')
defaults.extend((('donated' , '', ''), # the version that the user last donated for
('notified', '', ''), # the version that the user was last notified of
))
ui_options = [
'max_upload_rate' ,
'minport' ,
'maxport' ,
'next_torrent_time' ,
'next_torrent_ratio' ,
'last_torrent_ratio' ,
'seed_forever' ,
'seed_last_forever' ,
'ask_for_save' ,
'save_in' ,
'open_from' ,
'ip' ,
'start_torrent_behavior',
'upnp' ,
]
if os.name == 'nt':
ui_options.extend( [
'launch_on_startup' ,
'minimize_to_tray' ,
])
advanced_ui_options_index = len(ui_options)
ui_options.extend([
'min_uploads' ,
'max_uploads' ,
'max_initiate' ,
'max_incomplete' ,
'max_allow_in' ,
'max_files_open' ,
'forwarded_port' ,
'display_interval',
'donated' ,
'notified' ,
])
if is_frozen_exe:
ui_options.append('progressbar_hack')
defproghack = 0
if os_version == 'XP':
# turn on progress bar hack by default for Win XP
defproghack = 1
defaults.extend((('progressbar_hack' , defproghack, ''),))
NAG_FREQUENCY = 3
PORT_RANGE = 5
defconfig = dict([(name, value) for (name, value, doc) in defaults])
del name, value, doc
def btgui_exit(ipc):
ipc.stop()
class global_logger(object):
def __init__(self, logger = None):
self.logger = logger
def __call__(self, severity, msg):
if self.logger:
self.logger(severity, msg)
else:
sys.stderr.write("%s: %s\n" % (status_dict[severity], msg))
# if it's application global, why do we pass a reference to it everywhere?
global_log_func = global_logger()
if __name__ == '__main__':
zurllib.add_unsafe_thread()
try:
config, args = configfile.parse_configuration_and_args(defaults,
'bittorrent', sys.argv[1:], 0, None)
except BTFailure, e:
print str(e)
sys.exit(1)
config = Preferences().initWithDict(config)
advanced_ui = config['advanced']
newtorrents = args
for opt in ('responsefile', 'url'):
if config[opt]:
print '"--%s"' % opt, _("deprecated, do not use")
newtorrents.append(config[opt])
ipc = ipc_interface(config, global_log_func)
# this could be on the ipc object
ipc_master = True
try:
ipc.create()
except BTFailure:
ipc_master = False
try:
ipc.send_command('no-op')
except BTFailure:
global_log_func(ERROR, _("Failed to communicate with another %s process "
"but one seems to be running.") +
_(" Closing all %s windows may fix the problem.")
% (app_name, app_name))
sys.exit(1)
# make sure we clean up the ipc when we close
atexit.register(btgui_exit, ipc)
# it's not obvious, but 'newtorrents' is carried on to the gui
# __main__ if we're the IPC master
if not ipc_master:
if newtorrents:
# Not sure if anything really useful could be done if
# these send_command calls fail
for name in newtorrents:
ipc.send_command('start_torrent', name, config['save_as'])
sys.exit(0)
try:
ipc.send_command('show_error', _("%s already running")%app_name)
except BTFailure:
global_log_func(ERROR, _("Failed to communicate with another %s process.") +
_(" Closing all %s windows may fix the problem.")
% app_name)
sys.exit(1)
import gtk
import pango
import gobject
import webbrowser
assert gtk.pygtk_version >= (2, 6), _("PyGTK %s or newer required") % '2.6'
from BitTorrent import HELP_URL, DONATE_URL, SEARCH_URL, version, branch
from BitTorrent import TorrentQueue
from BitTorrent import LaunchPath
from BitTorrent import Desktop
from BitTorrent import ClientIdentifier
from BitTorrent import NewVersion
from BitTorrent.parseargs import makeHelp
from BitTorrent.TorrentQueue import RUNNING, RUN_QUEUED, QUEUED, KNOWN, ASKING_LOCATION
from BitTorrent.TrayIcon import TrayIcon
from BitTorrent.StatusLight import GtkStatusLight as StatusLight
from BitTorrent.GUI import *
main_torrent_dnd_tip = _("drag to reorder")
torrent_menu_tip = _("right-click for menu")
torrent_tip_format = '%s:\n %s\n %s'
rate_label = ': %s'
speed_classes = {
( 4, 5):_("dialup" ),
( 6, 14):_("DSL/cable 128k up"),
( 15, 29):_("DSL/cable 256k up"),
( 30, 91):_("DSL 768k up" ),
( 92, 137):_("T1" ),
( 138, 182):_("T1/E1" ),
( 183, 249):_("E1" ),
( 250, 5446):_("T3" ),
(5447,18871):_("OC3" ),
}
def find_dir(path):
if os.path.isdir(path):
return path
directory, garbage = os.path.split(path)
while directory:
if os.access(directory, os.F_OK) and os.access(directory, os.W_OK):
return directory
directory, garbage = os.path.split(directory)
if garbage == '':
break
return None
def smart_dir(path):
path = find_dir(path)
if path is None:
path = Desktop.desktop
return path
class MenuItem(gtk.MenuItem):
def __init__(self, label, accel_group=None, func=None):
gtk.MenuItem.__init__(self, label)
if func is not None:
self.connect("activate", func)
else:
self.set_sensitive(False)
if accel_group is not None:
label = label.decode('utf-8')
accel_index = label.find('_')
if -1 < accel_index < len(label) - 1:
accel_char = long(ord(label[accel_index+1]))
accel_key = gtk.gdk.unicode_to_keyval(accel_char)
if accel_key != accel_char | 0x01000000:
self.add_accelerator("activate", accel_group, accel_key,
gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
self.show()
def build_menu(menu_items, accel_group=None):
menu = gtk.Menu()
for label,func in menu_items:
if label == '----':
s = gtk.SeparatorMenuItem()
s.show()
menu.add(s)
else:
item = MenuItem(label, accel_group=accel_group, func=func)
item.show()
menu.add(item)
return menu
class Validator(gtk.Entry):
valid_chars = '1234567890'
minimum = None
maximum = None
cast = int
def __init__(self, option_name, config, setfunc):
gtk.Entry.__init__(self)
self.option_name = option_name
self.config = config
self.setfunc = setfunc
self.set_text(str(config[option_name]))
self.set_size_request(self.width,-1)
self.connect('insert-text', self.text_inserted)
self.connect('focus-out-event', self.focus_out)
def get_value(self):
value = None
try:
value = self.cast(self.get_text())
except ValueError:
pass
return value
def set_value(self, value):
self.set_text(str(value))
self.setfunc(self.option_name, value)
def focus_out(self, entry, widget):
value = self.get_value()
if value is None:
return
if (self.minimum is not None) and (value < self.minimum):
value = self.minimum
if (self.maximum is not None) and (value > self.maximum):
value = self.maximum
self.set_value(value)
def text_inserted(self, entry, input, position, user_data):
for i in input:
if (self.valid_chars is not None) and (i not in self.valid_chars):
self.emit_stop_by_name('insert-text')
return True
return False
class IPValidator(Validator):
valid_chars = '1234567890.'
width = 128
cast = str
class PortValidator(Validator):
width = 64
minimum = 1024
maximum = 65535
def add_end(self, end_name):
self.end_option_name = end_name
def set_value(self, value):
self.set_text(str(value))
self.setfunc(self.option_name, value)
self.setfunc(self.end_option_name, value+PORT_RANGE)
class PercentValidator(Validator):
width = 48
minimum = 0
class MinutesValidator(Validator):
width = 48
minimum = 1
class EnterUrlDialog(MessageDialog):
flags = gtk.DIALOG_DESTROY_WITH_PARENT
def __init__(self, parent):
self.entry = gtk.Entry()
self.entry.show()
self.main = parent
MessageDialog.__init__(self, parent.mainwindow,
_("Enter torrent URL"),
_("Enter the URL of a torrent file to open:"),
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_OK_CANCEL,
yesfunc=lambda *args: parent.open_url(self.entry.get_text()),
default=gtk.RESPONSE_OK
)
hbox = gtk.HBox()
hbox.pack_start(self.entry, padding=SPACING)
hbox.show()
self.entry.set_activates_default(True)
self.entry.set_flags(gtk.CAN_FOCUS)
self.vbox.pack_start(hbox)
self.entry.grab_focus()
def close(self, *args):
self.destroy()
def destroy(self):
MessageDialog.destroy(self)
self.main.window_closed('enterurl')
class RateSliderBox(gtk.VBox):
base = 10
multiplier = 4
max_exponent = 3.3
def __init__(self, config, torrentqueue):
gtk.VBox.__init__(self, homogeneous=False)
self.config = config
self.torrentqueue = torrentqueue
if self.config['max_upload_rate'] < self.slider_to_rate(0):
self.config['max_upload_rate'] = self.slider_to_rate(0)
self.speed_classes = {
( 4, 5):_("dialup" ),
( 6, 14):_("DSL/cable 128k up"),
( 15, 29):_("DSL/cable 256k up"),
( 30, 91):_("DSL 768k up" ),
( 92, 137):_("T1" ),
( 138, 182):_("T1/E1" ),
( 183, 249):_("E1" ),
( 250, 5446):_("T3" ),
(5447,18871):_("OC3" ),
}
biggest_size = 0
for v in self.speed_classes.values():
width = gtk.Label(v).size_request()[0]
if width > biggest_size:
biggest_size = width
self.rate_slider_label_box = gtk.HBox(spacing=SPACING,
homogeneous=True)
self.rate_slider_label = gtk.Label(_("Maximum upload rate:"))
self.rate_slider_label.set_ellipsize(pango.ELLIPSIZE_START)
self.rate_slider_label.set_alignment(1, 0.5)
self.rate_slider_label_box.pack_start(self.rate_slider_label,
expand=True, fill=True)
self.rate_slider_value = gtk.Label(
self.value_to_label(self.config['max_upload_rate']))
self.rate_slider_value.set_alignment(0, 0.5)
self.rate_slider_value.set_size_request(biggest_size, -1)
self.rate_slider_label_box.pack_start(self.rate_slider_value,
expand=True, fill=True)
self.rate_slider_adj = gtk.Adjustment(
self.rate_to_slider(self.config['max_upload_rate']), 0,
self.max_exponent, 0.01, 0.1)
self.rate_slider = gtk.HScale(self.rate_slider_adj)
self.rate_slider.set_draw_value(False)
self.rate_slider_adj.connect('value_changed', self.set_max_upload_rate)
self.pack_start(self.rate_slider , expand=False, fill=False)
self.pack_start(self.rate_slider_label_box , expand=False, fill=False)
if False: # this shows the legend for the slider
self.rate_slider_legend = gtk.HBox(homogeneous=True)
for i in range(int(self.max_exponent+1)):
label = gtk.Label(str(self.slider_to_rate(i)))
alabel = halign(label, i/self.max_exponent)
self.rate_slider_legend.pack_start(alabel,
expand=True, fill=True)
self.pack_start(self.rate_slider_legend, expand=False, fill=False)
def start(self):
self.set_max_upload_rate(self.rate_slider_adj)
def rate_to_slider(self, value):
return math.log(value/self.multiplier, self.base)
def slider_to_rate(self, value):
return int(round(self.base**value * self.multiplier))
def value_to_label(self, value):
conn_type = ''
for key, conn in self.speed_classes.items():
min_v, max_v = key
if min_v <= value <= max_v:
conn_type = ' (%s)'%conn
break
label = str(Rate(value*1024)) + conn_type
return label
def set_max_upload_rate(self, adj):
option = 'max_upload_rate'
value = self.slider_to_rate(adj.get_value())
self.config[option] = value
self.torrentqueue.set_config(option, value)
self.rate_slider_value.set_text(self.value_to_label(int(value)))
class StopStartButton(gtk.Button):
stop_tip = _("Temporarily stop all running torrents")
start_tip = _("Resume downloading")
def __init__(self, main):
gtk.Button.__init__(self)
self.main = main
self.connect('clicked', self.toggle)
self.stop_image = gtk.Image()
self.stop_image.set_from_stock('bt-pause', gtk.ICON_SIZE_BUTTON)
self.stop_image.show()
self.start_image = gtk.Image()
self.start_image.set_from_stock('bt-play', gtk.ICON_SIZE_BUTTON)
self.start_image.show()
def toggle(self, widget):
self.set_paused(not self.main.config['pause'])
def set_paused(self, paused):
image = self.get_child()
if paused:
if image == self.stop_image:
self.remove(self.stop_image)
if image != self.start_image:
self.add(self.start_image)
self.main.tooltips.set_tip(self, self.start_tip)
self.main.stop_queue()
else:
if image == self.start_image:
self.remove(self.start_image)
if image != self.stop_image:
self.add(self.stop_image)
self.main.tooltips.set_tip(self, self.stop_tip )
self.main.restart_queue()
class VersionWindow(Window):
def __init__(self, main, newversion, download_url):
Window.__init__(self)
self.set_title(_("New %s version available")%app_name)
self.set_border_width(SPACING)
self.set_resizable(False)
self.main = main
self.newversion = newversion
self.download_url = download_url
self.connect('destroy', lambda w: self.main.window_closed('version'))
self.vbox = gtk.VBox(spacing=SPACING)
self.hbox = gtk.HBox(spacing=SPACING)
self.image = gtk.Image()
self.image.set_from_stock(gtk.STOCK_DIALOG_INFO, gtk.ICON_SIZE_DIALOG)
self.hbox.pack_start(self.image)
self.label = gtk.Label()
self.label.set_markup(
(_("A newer version of %s is available.\n") % app_name) +
(_("You are using %s, and the new version is %s.\n") % (version, newversion)) +
(_("You can always get the latest version from \n%s") % self.download_url)
)
self.label.set_selectable(True)
self.hbox.pack_start(self.label)
self.vbox.pack_start(self.hbox)
self.bbox = gtk.HBox(spacing=SPACING)
self.closebutton = gtk.Button(_("Download _later"))
self.closebutton.connect('clicked', self.close)
self.newversionbutton = gtk.Button(_("Download _now"))
self.newversionbutton.connect('clicked', self.get_newversion)
self.bbox.pack_end(self.newversionbutton, expand=False, fill=False)
self.bbox.pack_end(self.closebutton , expand=False, fill=False)
self.checkbox = gtk.CheckButton(_("_Remind me later"))
self.checkbox.set_active(True)
self.checkbox.connect('toggled', self.remind_toggle)
self.bbox.pack_start(self.checkbox, expand=False, fill=False)
self.vbox.pack_start(self.bbox)
self.add(self.vbox)
self.show_all()
def remind_toggle(self, widget):
v = self.checkbox.get_active()
notified = ''
if v:
notified = ''
else:
notified = self.newversion
self.main.set_config('notified', str(notified))
def close(self, widget):
self.destroy()
def get_newversion(self, widget):
if self.main.updater.can_install():
if self.main.updater.torrentfile is None:
self.main.visit_url(self.download_url)
else:
self.main.start_auto_update()
else:
self.main.visit_url(self.download_url)
self.destroy()
class AboutWindow(object):
def __init__(self, main, donatefunc):
self.win = Window()
self.win.set_title(_("About %s")%app_name)
self.win.set_size_request(300,400)
self.win.set_border_width(SPACING)
self.win.set_resizable(False)
self.win.connect('destroy', lambda w: main.window_closed('about'))
self.scroll = gtk.ScrolledWindow()
self.scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.scroll.set_shadow_type(gtk.SHADOW_IN)
self.outervbox = gtk.VBox()
self.outervbox.pack_start(get_logo(96), expand=False, fill=False)
version_str = version
if int(version_str[2]) % 2:
version_str = version_str + ' ' + _("Beta")
self.outervbox.pack_start(gtk.Label(_("Version %s")%version_str), expand=False, fill=False)
if branch is not None:
blabel = gtk.Label('cdv client dir: %s' % branch)
self.outervbox.pack_start(blabel, expand=False, fill=False)
self.vbox = gtk.VBox()
self.vbox.set_size_request(250, -1)
for i, fn in enumerate(('credits', 'credits-l10n')):
if i != 0:
self.vbox.pack_start(gtk.HSeparator(), padding=SPACING,
expand=False, fill=False)
filename = os.path.join(doc_root, fn+'.txt')
l = ''
if not os.access(filename, os.F_OK|os.R_OK):
l = _("Couldn't open %s") % filename
else:
credits_f = file(filename)
l = credits_f.read()
credits_f.close()
if os.name == 'nt':
# gtk ignores blank lines on win98
l = l.replace('\n\n', '\n\t\n')
label = gtk.Label(l.strip())
label.set_line_wrap(True)
label.set_selectable(True)
label.set_justify(gtk.JUSTIFY_CENTER)
label.set_size_request(250,-1)
self.vbox.pack_start(label, expand=False, fill=False)
self.scroll.add_with_viewport(self.vbox)
self.outervbox.pack_start(self.scroll, padding=SPACING)
self.donatebutton = gtk.Button(_("Donate"))
self.donatebutton.connect('clicked', donatefunc)
self.donatebuttonbox = gtk.HButtonBox()
self.donatebuttonbox.pack_start(self.donatebutton,
expand=False, fill=False)
self.outervbox.pack_end(self.donatebuttonbox, expand=False, fill=False)
self.win.add(self.outervbox)
self.win.show_all()
def close(self, widget):
self.win.destroy()
class LogWindow(object):
def __init__(self, main, logbuffer, config):
self.config = config
self.main = main
self.win = Window()
self.win.set_title(_("%s Activity Log")%app_name)
self.win.set_default_size(600, 200)
self.win.set_border_width(SPACING)
self.buffer = logbuffer
self.text = gtk.TextView(self.buffer)
self.text.set_editable(False)
self.text.set_cursor_visible(False)
self.text.set_wrap_mode(gtk.WRAP_WORD)
self.scroll = gtk.ScrolledWindow()
self.scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.scroll.set_shadow_type(gtk.SHADOW_IN)
self.scroll.add(self.text)
self.vbox = gtk.VBox(spacing=SPACING)
self.vbox.pack_start(self.scroll)
self.buttonbox = gtk.HButtonBox()
self.buttonbox.set_spacing(SPACING)
self.closebutton = gtk.Button(stock='gtk-close')
self.closebutton.connect('clicked', self.close)
self.savebutton = gtk.Button(stock='gtk-save')
self.savebutton.connect('clicked', self.save_log_file_selection)
self.clearbutton = gtk.Button(stock='gtk-clear')
self.clearbutton.connect('clicked', self.clear_log)
self.buttonbox.pack_start(self.savebutton)
self.buttonbox.pack_start(self.closebutton)
self.hbox2 = gtk.HBox(homogeneous=False)
self.hbox2.pack_end(self.buttonbox, expand=False, fill=False)
bb = gtk.HButtonBox()
bb.pack_start(self.clearbutton)
self.hbox2.pack_start(bb, expand=False, fill=True)
self.vbox.pack_end(self.hbox2, expand=False, fill=True)
self.win.add(self.vbox)
self.win.connect("destroy", lambda w: self.main.window_closed('log'))
self.scroll_to_end()
self.win.show_all()
def scroll_to_end(self):
mark = self.buffer.create_mark(None, self.buffer.get_end_iter())
self.text.scroll_mark_onscreen(mark)
def save_log_file_selection(self, *args):
name = 'bittorrent.log'
path = smart_dir(self.config['save_in'])
fullname = os.path.join(path, name)
self.main.open_window('savefile',
title=_("Save log in:"),
fullname=fullname,
got_location_func=self.save_log,
no_location_func=lambda: self.main.window_closed('savefile'))
def save_log(self, saveas):
self.main.window_closed('savefile')
f = file(saveas, 'w')
f.write(self.buffer.get_text(self.buffer.get_start_iter(),
self.buffer.get_end_iter()))
save_message = self.buffer.log_text(_("log saved"), None)
f.write(save_message)
f.close()
def clear_log(self, *args):
self.buffer.clear_log()
def close(self, widget):
self.win.destroy()
class LogBuffer(gtk.TextBuffer):
def __init__(self):
gtk.TextBuffer.__init__(self)
tt = self.get_tag_table()
size_tag = gtk.TextTag('small')
size_tag.set_property('size-points', 10)
tt.add(size_tag)
info_tag = gtk.TextTag('info')
info_tag.set_property('foreground', '#00a040')
tt.add(info_tag)
warning_tag = gtk.TextTag('warning')
warning_tag.set_property('foreground', '#a09000')
tt.add(warning_tag)
error_tag = gtk.TextTag('error')
error_tag.set_property('foreground', '#b00000')
tt.add(error_tag)
critical_tag = gtk.TextTag('critical')
critical_tag.set_property('foreground', '#b00000')
critical_tag.set_property('weight', pango.WEIGHT_BOLD)
tt.add(critical_tag)
def log_text(self, text, severity=CRITICAL):
now_str = datetime.datetime.strftime(datetime.datetime.now(),
'[%Y-%m-%d %H:%M:%S] ')
self.insert_with_tags_by_name(self.get_end_iter(), now_str, 'small')
if severity is not None:
self.insert_with_tags_by_name(self.get_end_iter(), '%s\n'%text,
'small', status_dict[severity])
else:
self.insert_with_tags_by_name(self.get_end_iter(),
' -- %s -- \n'%text, 'small')
return now_str+text+'\n'
def clear_log(self):
self.set_text('')
self.log_text(_("log cleared"), None)
class CheckButton(gtk.CheckButton):
def __init__(self, label, main, option_name, initial_value,
extra_callback=None):
gtk.CheckButton.__init__(self, label)
self.main = main
self.option_name = option_name
self.option_type = type(initial_value)
self.set_active(bool(initial_value))
self.extra_callback = extra_callback
self.connect('toggled', self.callback)
def callback(self, *args):
self.main.config[self.option_name] = \
self.option_type(not self.main.config[self.option_name])
self.main.setfunc(self.option_name, self.main.config[self.option_name])
if self.extra_callback is not None:
self.extra_callback()
class SettingsWindow(object):
def __init__(self, main, config, setfunc):
self.main = main
self.setfunc = setfunc
self.config = config
self.win = Window()
self.win.connect("destroy", lambda w: main.window_closed('settings'))
self.win.set_title(_("%s Settings")%app_name)
self.win.set_border_width(SPACING)
self.notebook = gtk.Notebook()
self.vbox = gtk.VBox(spacing=SPACING)
self.vbox.pack_start(self.notebook, expand=False, fill=False)
# General tab
if os.name == 'nt':
self.cb_box = gtk.VBox(spacing=SPACING)
self.cb_box.set_border_width(SPACING)
self.notebook.append_page(self.cb_box, gtk.Label(_("General")))
self.startup_checkbutton = CheckButton(
_("Launch BitTorrent when Windows starts"), self,
'launch_on_startup', self.config['launch_on_startup'])
self.cb_box.pack_start(self.startup_checkbutton, expand=False, fill=False)
self.startup_checkbutton.connect('toggled', self.launch_on_startup)
self.minimize_checkbutton = CheckButton(
_("Minimize to system tray"), self,
'minimize_to_tray', self.config['minimize_to_tray'])
self.cb_box.pack_start(self.minimize_checkbutton, expand=False, fill=False)
# allow the user to set the progress bar text to all black
self.progressbar_hack = CheckButton(
_("Progress bar text is always black\n(requires restart)"),
self, 'progressbar_hack', self.config['progressbar_hack'])
self.cb_box.pack_start(self.progressbar_hack, expand=False, fill=False)
# end General tab
# Saving tab
self.saving_box = gtk.VBox(spacing=SPACING)
self.saving_box.set_border_width(SPACING)
self.notebook.append_page(self.saving_box, gtk.Label(_("Saving")))
self.dl_frame = gtk.Frame(_("Save new downloads in:"))
self.saving_box.pack_start(self.dl_frame, expand=False, fill=False)
self.dl_box = gtk.VBox(spacing=SPACING)
self.dl_box.set_border_width(SPACING)
self.dl_frame.add(self.dl_box)
self.save_in_box = gtk.HBox(spacing=SPACING)
self.dl_save_in = gtk.Entry()
self.dl_save_in.set_editable(False)
self.set_save_in(self.config['save_in'])
self.save_in_box.pack_start(self.dl_save_in, expand=True, fill=True)
self.dl_save_in_button = gtk.Button(_("Change..."))
self.dl_save_in_button.connect('clicked', self.get_save_in)
self.save_in_box.pack_start(self.dl_save_in_button, expand=False, fill=False)
self.dl_box.pack_start(self.save_in_box, expand=False, fill=False)
self.dl_ask_checkbutton = CheckButton(
_("Ask where to save each new download"), self,
'ask_for_save', self.config['ask_for_save'])
self.dl_box.pack_start(self.dl_ask_checkbutton, expand=False, fill=False)
# end Saving tab
# Downloading tab
self.downloading_box = gtk.VBox(spacing=SPACING)
self.downloading_box.set_border_width(SPACING)
self.notebook.append_page(self.downloading_box, gtk.Label(_("Downloading")))
self.dnd_frame = gtk.Frame(_("When starting a new torrent:"))
self.dnd_box = gtk.VBox(spacing=SPACING, homogeneous=True)
self.dnd_box.set_border_width(SPACING)
self.dnd_states = ['replace','add','ask']
self.dnd_original_state = self.config['start_torrent_behavior']
self.always_replace_radio = gtk.RadioButton(
group=None,
label=_("_Stop another running torrent to make room"))
self.dnd_box.pack_start(self.always_replace_radio)
self.always_replace_radio.state_name = self.dnd_states[0]
self.always_add_radio = gtk.RadioButton(
group=self.always_replace_radio,
label=_("_Don't stop other running torrents"))
self.dnd_box.pack_start(self.always_add_radio)
self.always_add_radio.state_name = self.dnd_states[1]
self.always_ask_radio = gtk.RadioButton(
group=self.always_replace_radio,
label=_("_Ask each time")
)
self.dnd_box.pack_start(self.always_ask_radio)
self.always_ask_radio.state_name = self.dnd_states[2]
self.dnd_group = self.always_replace_radio.get_group()
for r in self.dnd_group:
r.connect('toggled', self.start_torrent_behavior_changed)
self.set_start_torrent_behavior(self.config['start_torrent_behavior'])
self.dnd_frame.add(self.dnd_box)
self.downloading_box.pack_start(self.dnd_frame, expand=False, fill=False)
# Seeding tab
self.seeding_box = gtk.VBox(spacing=SPACING)
self.seeding_box.set_border_width(SPACING)
self.notebook.append_page(self.seeding_box, gtk.Label(_("Seeding")))
def colon_split(framestr):
COLONS = (':', u'\uff1a')
for colon in COLONS:
if colon in framestr:
return framestr.split(colon)
return '', framestr
nt_framestr = _("Seed completed torrents: until share ratio reaches [_] percent, or for [_] minutes, whichever comes first.")
nt_title, nt_rem = colon_split(nt_framestr)
nt_msg1, nt_msg2, nt_msg4 = nt_rem.split('[_]')
nt_msg3 = ''
if ',' in nt_msg2:
nt_msg2, nt_msg3 = nt_msg2.split(',')
nt_msg2 += ','
self.next_torrent_frame = gtk.Frame(nt_title+':')
self.next_torrent_box = gtk.VBox(spacing=SPACING, homogeneous=True)
self.next_torrent_box.set_border_width(SPACING)
self.next_torrent_frame.add(self.next_torrent_box)
self.next_torrent_ratio_box = gtk.HBox()
self.next_torrent_ratio_box.pack_start(gtk.Label(nt_msg1),
fill=False, expand=False)
self.next_torrent_ratio_field = PercentValidator('next_torrent_ratio',
self.config, self.setfunc)
self.next_torrent_ratio_box.pack_start(self.next_torrent_ratio_field,
fill=False, expand=False)
self.next_torrent_ratio_box.pack_start(gtk.Label(nt_msg2),
fill=False, expand=False)
self.next_torrent_box.pack_start(self.next_torrent_ratio_box)
self.next_torrent_time_box = gtk.HBox()
self.next_torrent_time_box.pack_start(gtk.Label(nt_msg3),
fill=False, expand=False)
self.next_torrent_time_field = MinutesValidator('next_torrent_time',
self.config, self.setfunc)
self.next_torrent_time_box.pack_start(self.next_torrent_time_field,
fill=False, expand=False)
self.next_torrent_time_box.pack_start(gtk.Label(nt_msg4),
fill=False, expand=False)
self.next_torrent_box.pack_start(self.next_torrent_time_box)
def seed_forever_extra():
for field in (self.next_torrent_ratio_field,
self.next_torrent_time_field):
field.set_sensitive(not self.config['seed_forever'])
seed_forever_extra()
self.seed_forever = CheckButton( _("Seed indefinitely"), self,
'seed_forever',
self.config['seed_forever'],
seed_forever_extra)
self.next_torrent_box.pack_start(self.seed_forever)
# end next torrent seed behavior
# begin last torrent seed behavior
lt_framestr = _("Seed last completed torrent: until share ratio reaches [_] percent.")
lt_title, lt_rem = colon_split(lt_framestr)
lt_msg1, lt_msg2 = lt_rem.split('[_]')
self.seeding_box.pack_start(self.next_torrent_frame, expand=False, fill=False)
self.last_torrent_frame = gtk.Frame(lt_title+':')
self.last_torrent_vbox = gtk.VBox(spacing=SPACING)
self.last_torrent_vbox.set_border_width(SPACING)
self.last_torrent_box = gtk.HBox()
self.last_torrent_box.pack_start(gtk.Label(lt_msg1),
expand=False, fill=False)
self.last_torrent_ratio_field = PercentValidator('last_torrent_ratio',
self.config, self.setfunc)
self.last_torrent_box.pack_start(self.last_torrent_ratio_field,
fill=False, expand=False)
self.last_torrent_box.pack_start(gtk.Label(lt_msg2),
fill=False, expand=False)
self.last_torrent_vbox.pack_start(self.last_torrent_box)
def seed_last_forever_extra():
self.last_torrent_ratio_field.set_sensitive(
not self.config['seed_last_forever'])
seed_last_forever_extra()
self.seed_last_forever = CheckButton(_("Seed indefinitely"), self,
'seed_last_forever',
self.config['seed_last_forever'],
seed_last_forever_extra)
self.last_torrent_vbox.pack_start(self.seed_last_forever)
self.last_torrent_frame.add(self.last_torrent_vbox)
self.seeding_box.pack_start(self.last_torrent_frame, expand=False, fill=False)
# Network tab
self.network_box = gtk.VBox(spacing=SPACING)
self.network_box.set_border_width(SPACING)
self.notebook.append_page(self.network_box, gtk.Label(_("Network")))
self.port_range_frame = gtk.Frame(_("Look for available port:"))
self.port_range_box = gtk.VBox(spacing=SPACING)
self.port_range_box.set_border_width(SPACING)
self.port_range = gtk.HBox()
self.port_range.pack_start(gtk.Label(_("starting at port: ")),
expand=False, fill=False)
self.minport_field = PortValidator('minport', self.config, self.setfunc)
self.minport_field.add_end('maxport')
self.port_range.pack_start(self.minport_field, expand=False, fill=False)
self.minport_field.settingswindow = self
self.port_range.pack_start(gtk.Label(' (1024-65535)'),
expand=False, fill=False)
self.port_range_box.pack_start(self.port_range,
expand=False, fill=False)
self.upnp = CheckButton(_("Enable automatic port mapping")+' (_UPnP)',
self, 'upnp', self.config['upnp'], None)
self.port_range_box.pack_start(self.upnp,
expand=False, fill=False)
self.port_range_frame.add(self.port_range_box)
self.network_box.pack_start(self.port_range_frame, expand=False, fill=False)
self.ip_frame = gtk.Frame(_("IP to report to the tracker:"))
self.ip_box = gtk.VBox()
self.ip_box.set_border_width(SPACING)
self.ip_field = IPValidator('ip', self.config, self.setfunc)
self.ip_box.pack_start(self.ip_field, expand=False, fill=False)
label = gtk.Label(_("(Has no effect unless you are on the\nsame local network as the tracker)"))
label.set_line_wrap(True)
self.ip_box.pack_start(lalign(label), expand=False, fill=False)
self.ip_frame.add(self.ip_box)
self.network_box.pack_start(self.ip_frame, expand=False, fill=False)
# end Network tab
# Language tab
self.languagechooser = LanguageChooser()
self.notebook.append_page(self.languagechooser, gtk.Label("Language"))
# end Language tab
# Advanced tab
if advanced_ui:
self.advanced_box = gtk.VBox(spacing=SPACING)
self.advanced_box.set_border_width(SPACING)
hint = gtk.Label(_("WARNING: Changing these settings can\nprevent %s from functioning correctly.")%app_name)
self.advanced_box.pack_start(lalign(hint), expand=False, fill=False)
self.store = gtk.ListStore(*[gobject.TYPE_STRING] * 2)
for option in ui_options[advanced_ui_options_index:]:
self.store.append((option, str(self.config[option])))
self.treeview = gtk.TreeView(self.store)
r = gtk.CellRendererText()
column = gtk.TreeViewColumn(_("Option"), r, text=0)
self.treeview.append_column(column)
r = gtk.CellRendererText()
r.set_property('editable', True)
r.connect('edited', self.store_value_edited)
column = gtk.TreeViewColumn(_("Value"), r, text=1)
self.treeview.append_column(column)
self.advanced_frame = gtk.Frame()
self.advanced_frame.set_shadow_type(gtk.SHADOW_IN)
self.advanced_frame.add(self.treeview)
self.advanced_box.pack_start(self.advanced_frame, expand=False, fill=False)
self.notebook.append_page(self.advanced_box, gtk.Label(_("Advanced")))
self.win.add(self.vbox)
self.win.show_all()
def get_save_in(self, widget=None):
self.file_selection = self.main.open_window('choosefolder',
title=_("Choose default download directory"),
fullname=self.config['save_in'],
got_location_func=self.set_save_in,
no_location_func=lambda: self.main.window_closed('choosefolder'))
def set_save_in(self, save_location):
self.main.window_closed('choosefolder')
if os.path.isdir(save_location):
if save_location[-1] != os.sep:
save_location += os.sep
self.config['save_in'] = save_location
save_in = path_wrap(self.config['save_in'])
self.dl_save_in.set_text(save_in)
self.setfunc('save_in', self.config['save_in'])
def launch_on_startup(self, *args):
dst = os.path.join(get_startup_dir(), app_name)
if self.config['launch_on_startup']:
src = os.path.abspath(sys.argv[0])
create_shortcut(src, dst, "--start_minimized")
else:
try:
remove_shortcut(dst)
except Exception, e:
self.main.global_error(WARNING, _("Failed to remove shortcut: %s") % str(e))
def set_start_torrent_behavior(self, state_name):
if state_name in self.dnd_states:
for r in self.dnd_group:
if r.state_name == state_name:
r.set_active(True)
else:
r.set_active(False)
else:
self.always_replace_radio.set_active(True)
def start_torrent_behavior_changed(self, radiobutton):
if radiobutton.get_active():
self.setfunc('start_torrent_behavior', radiobutton.state_name)
def store_value_edited(self, cell, row, new_text):
it = self.store.get_iter_from_string(row)
option = ui_options[int(row)+advanced_ui_options_index]
t = type(defconfig[option])
try:
if t is type(None) or t is str:
value = new_text
elif t is int or t is long:
value = int(new_text)
elif t is float:
value = float(new_text)
elif t is bool:
value = value == 'True'
else:
raise TypeError, str(t)
except ValueError:
return
self.setfunc(option, value)
self.store.set(it, 1, str(value))
def close(self, widget):
self.win.destroy()
class FileListWindow(object):
SET_PRIORITIES = False
def __init__(self, metainfo, closefunc):
self.metainfo = metainfo
self.setfunc = None
self.allocfunc = None
self.win = Window()
self.win.set_title(_('Files in "%s"') % self.metainfo.name)
self.win.connect("destroy", closefunc)
self.tooltips = gtk.Tooltips()
self.filepath_to_iter = {}
self.box1 = gtk.VBox()
size_request = (0,0)
if self.SET_PRIORITIES:
self.toolbar = gtk.Toolbar()
for label, tip, stockicon, method, arg in (
(_("Never" ), _("Never download" ), gtk.STOCK_DELETE, self.dosomething, -1,),
(_("Normal"), _("Download normally"), gtk.STOCK_NEW , self.dosomething, 0,),
(_("First" ), _("Download first" ),'bt-finished' , self.dosomething, +1,),):
self.make_tool_item(label, tip, stockicon, method, arg)
size_request = (-1,54)
self.box1.pack_start(self.toolbar, False)
self.sw = gtk.ScrolledWindow()
self.sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.box1.pack_start(self.sw)
self.win.add(self.box1)
columns = [_("Filename"),_("Length"),_('%')]
pre_size_list = ['M'*30, '6666 MB', '100.0', 'Download','black']
if self.SET_PRIORITIES:
columns.append(_("Download"))
num_columns = len(pre_size_list)
self.store = gtk.TreeStore(*[gobject.TYPE_STRING] * num_columns)
self.store.append(None, pre_size_list)
self.treeview = gtk.TreeView(self.store)
self.treeview.set_enable_search(True)
self.treeview.set_search_column(0)
cs = []
for i, name in enumerate(columns):
r = gtk.CellRendererText()
r.set_property('xalign', (0, 1, 1, 1)[i])
if i == 0:
column = gtk.TreeViewColumn(name, r, text = i, foreground = len(pre_size_list)-1)
else:
column = gtk.TreeViewColumn(name, r, text = i)
column.set_resizable(True)
self.treeview.append_column(column)
cs.append(column)
self.sw.add(self.treeview)
self.treeview.set_headers_visible(False)
self.treeview.columns_autosize()
self.box1.show_all()
self.treeview.realize()
for column in cs:
column.set_fixed_width(max(5,column.get_width()))
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
self.treeview.set_headers_visible(True)
self.store.clear()
if self.SET_PRIORITIES:
self.treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
else:
self.treeview.get_selection().set_mode(gtk.SELECTION_NONE)
self.piecelen = self.metainfo.piece_length
self.lengths = self.metainfo.sizes
self.initialize_file_priorities()#[0,0])
for name, size, priority in itertools.izip(self.metainfo.orig_files,
self.metainfo.sizes, self.priorities):
parent_name, local_name = os.path.split(name)
parent_iter = self.recursive_add(parent_name)
row = [local_name, Size(size), '?','', 'black']
it = self.store.append(parent_iter, row)
self.filepath_to_iter[name] = it
self.treeview.expand_all()
tvsr = self.treeview.size_request()
vertical_padding = 18
size_request = [max(size_request[0],tvsr[0]),
(size_request[1] + tvsr[1] ) + vertical_padding]
maximum_height = 300
if size_request[1] > maximum_height - SCROLLBAR_WIDTH:
size_request[1] = maximum_height
size_request[0] = size_request[0] + SCROLLBAR_WIDTH
self.win.set_default_size(*size_request)
self.win.show_all()
def recursive_add(self, fullpath):
if fullpath == '':
return None
elif self.filepath_to_iter.has_key(fullpath):
return self.filepath_to_iter[fullpath]
else:
parent_path, local_path = os.path.split(fullpath)
parent_iter = self.recursive_add(parent_path)
it = self.store.append(parent_iter,
(local_path,) +
('',) * (self.store.get_n_columns()-2) +
('black',))
self.filepath_to_iter[fullpath] = it
return it
def make_tool_item(self, label, tip, stockicon, method, arg):
icon = gtk.Image()
icon.set_from_stock(stockicon, gtk.ICON_SIZE_SMALL_TOOLBAR)
item = gtk.ToolButton(icon_widget=icon, label=label)
item.set_homogeneous(True)
item.set_tooltip(self.tooltips, tip)
if arg is not None:
item.connect('clicked', method, arg)
else:
item.connect('clicked', method)
self.toolbar.insert(item, 0)
def initialize_file_priorities(self):
self.priorities = []
for length in self.lengths:
self.priorities.append(0)
## Uoti wrote these methods. I have no idea what this code is supposed to do.
## --matt
## def set_priorities(self, widget):
## r = []
## piece = 0
## pos = 0
## curprio = prevprio = 1000
## for priority, length in itertools.izip(self.priorities, self.lengths):
## pos += length
## curprio = min(priority, curprio)
## while pos >= (piece + 1) * self.piecelen:
## if curprio != prevprio:
## r.extend((piece, curprio))
## prevprio = curprio
## if curprio == priority:
## piece = pos // self.piecelen
## else:
## piece += 1
## if pos == piece * self.piecelen:
## curprio = 1000
## else:
## curprio = priority
## if curprio != prevprio:
## r.extend((piece, curprio))
## self.setfunc(r)
## it = self.store.get_iter_first()
## for i in xrange(len(self.priorities)):
## self.store.set_value(it, 5, "black")
## it = self.store.iter_next(it)
## self.origpriorities = list(self.priorities)
##
## def initialize_file_priorities(self, piecepriorities):
## self.priorities = []
## piecepriorities = piecepriorities + [999999999]
## it = iter(piecepriorities)
## assert it.next() == 0
## pos = piece = curprio = 0
## for length in self.lengths:
## pos += length
## priority = curprio
## while pos >= piece * self.piecelen:
## curprio = it.next()
## if pos > piece * self.piecelen:
## priority = max(priority, curprio)
## piece = it.next()
## self.priorities.append(priority)
## self.origpriorities = list(self.priorities)
def dosomething(self, widget, dowhat):
self.treeview.get_selection().selected_foreach(self.adjustfile, dowhat)
def adjustfile(self, treemodel, path, it, dowhat):
length = treemodel.get(it, 1)[0]
if length == '':
child = treemodel.iter_children(it)
while True:
if child is None:
return
elif not treemodel.is_ancestor(it, child):
return
else:
self.adjustfile(treemodel, path, child, dowhat)
child = treemodel.iter_next(child)
else:
# BUG: need to set file priorities in backend here
if dowhat == -1:
text, color = _("never"), 'darkgrey'
elif dowhat == 1:
text, color = _("first"), 'darkgreen'
else:
text, color = '', 'black'
treemodel.set_value(it, 3, text )
treemodel.set_value(it, 4, color)
def update(self, left, allocated):
for name, left, total, alloc in itertools.izip(
self.metainfo.orig_files, left, self.lengths, allocated):
it = self.filepath_to_iter[name]
if total == 0:
p = 1
else:
p = (total - left) / total
self.store.set_value(it, 2, "%.1f" % (int(p * 1000)/10))
def close(self):
self.win.destroy()
class PeerListWindow(object):
def __init__(self, torrent_name, closefunc):
self.win = Window()
self.win.connect("destroy", closefunc)
self.win.set_title( _('Peers for "%s"')%torrent_name)
self.sw = gtk.ScrolledWindow()
self.sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.sw.set_shadow_type(gtk.SHADOW_IN)
self.win.add(self.sw)
column_header = [_("IP address"), _("Client"), _("Connection"), _("KB/s down"), _("KB/s up"), _("MB downloaded"), _("MB uploaded"), _("% complete"), _("KB/s est. peer download")]
pre_size_list = ['666.666.666.666', 'TorrentStorm 1.3', 'bad peer', 66666, 66666, '1666.66', '1666.66', '100.0', 6666]
numeric_cols = [3,4,5,6,7,8]
store_types = [gobject.TYPE_STRING]*3 + [gobject.TYPE_INT]*2 + [gobject.TYPE_STRING]*3 + [gobject.TYPE_INT]
if advanced_ui:
column_header[2:2] = [_("Peer ID")]
pre_size_list[2:2] = ['-AZ2104-']
store_types[2:2] = [gobject.TYPE_STRING]
column_header[5:5] = [_("Interested"),_("Choked"),_("Snubbed")]
pre_size_list[5:5] = ['*','*','*']
store_types[5:5] = [gobject.TYPE_STRING]*3
column_header[9:9] = [_("Interested"),_("Choked"),_("Optimistic upload")]
pre_size_list[9:9] = ['*','*','*']
store_types[9:9] = [gobject.TYPE_STRING]*3
numeric_cols = [4,8,12,13,14,15]
num_columns = len(column_header)
self.store = gtk.ListStore(*store_types)
self.store.append(pre_size_list)
def makesortfunc(sort_func):
def sortfunc(treemodel, iter1, iter2, column):
a_str = treemodel.get_value(iter1, column)
b_str = treemodel.get_value(iter2, column)
if a_str is not None and b_str is not None:
return sort_func(a_str,b_str)
else:
return 0
return sortfunc
def ip_sort(a_str,b_str):
for a,b in zip(a_str.split('.'), b_str.split('.')):
if a == b:
continue
if len(a) == len(b):
return cmp(a,b)
return cmp(int(a), int(b))
return 0
def float_sort(a_str,b_str):
a,b = 0,0
try: a = float(a_str)
except ValueError: pass
try: b = float(b_str)
except ValueError: pass
return cmp(a,b)
self.store.set_sort_func(0, makesortfunc(ip_sort), 0)
for i in range(2,5):
self.store.set_sort_func(num_columns-i, makesortfunc(float_sort), num_columns-i)
self.treeview = gtk.TreeView(self.store)
cs = []
for i, name in enumerate(column_header):
r = gtk.CellRendererText()
if i in numeric_cols:
r.set_property('xalign', 1)
column = gtk.TreeViewColumn(name, r, text = i)
column.set_resizable(True)
column.set_min_width(5)
column.set_sort_column_id(i)
self.treeview.append_column(column)
cs.append(column)
self.treeview.set_rules_hint(True)
self.sw.add(self.treeview)
self.treeview.set_headers_visible(False)
self.treeview.columns_autosize()
self.sw.show_all()
self.treeview.realize()
for column in cs:
column.set_fixed_width(column.get_width())
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
self.treeview.set_headers_visible(True)
self.store.clear()
self.treeview.get_selection().set_mode(gtk.SELECTION_NONE)
width = self.treeview.size_request()[0]
self.win.set_default_size(width+SCROLLBAR_WIDTH, 300)
self.win.show_all()
self.prev = []
def update(self, peers, bad_peers):
fields = []
def p_bool(value): return value and '*' or ''
for peer in peers:
field = []
field.append(peer['ip'])
client, version = ClientIdentifier.identify_client(peer['id'])
field.append(client + ' ' + version)
if advanced_ui:
field.append(zurllib.quote(peer['id']))
field.append(peer['initiation'] == 'R' and _("remote") or _("local"))
dl = peer['download']
ul = peer['upload']
for l in (dl, ul):
rate = l[1]
if rate > 100:
field.append(int(round(rate/(2**10))))
else:
field.append(0)
if advanced_ui:
field.append(p_bool(l[2]))
field.append(p_bool(l[3]))
if len(l) > 4:
field.append(p_bool(l[4]))
else:
field.append(p_bool(peer['is_optimistic_unchoke']))
field.append('%.2f'%round(dl[0] / 2**20, 2))
field.append('%.2f'%round(ul[0] / 2**20, 2))
field.append('%.1f'%round(int(peer['completed']*1000)/10, 1))
field.append(int(peer['speed']//(2**10)))
fields.append(field)
for (ip, (is_banned, stats)) in bad_peers.iteritems():
field = []
field.append(ip)
client, version = ClientIdentifier.identify_client(stats.peerid)
field.append(client + ' ' + version)
if advanced_ui:
field.append(zurllib.quote(stats.peerid))
field.append(_("bad peer"))
# the sortable peer list won't take strings in these fields
field.append(0)
if advanced_ui:
field.extend([0] * 7) # upRate, * fields
else:
field.extend([0] * 1) # upRate
field.append(_("%d ok") % stats.numgood)
field.append(_("%d bad") % len(stats.bad))
if is_banned: # completion
field.append(_("banned"))
else:
field.append(_("ok"))
field.append(0) # peer dl rate
fields.append(field)
if self.store.get_sort_column_id() < 0:
# ListStore is unsorted, it might be faster to set only modified fields
it = self.store.get_iter_first()
for old, new in itertools.izip(self.prev, fields):
if old != new:
for i, value in enumerate(new):
if value != old[i]:
self.store.set_value(it, i, value)
it = self.store.iter_next(it)
for i in range(len(fields), len(self.prev)):
self.store.remove(it)
for i in range(len(self.prev), len(fields)):
self.store.append(fields[i])
self.prev = fields
else:
# ListStore is sorted, no reason not to to reset all fields
self.store.clear()
for field in fields:
self.store.append(field)
def close(self):
self.win.destroy()
class TorrentInfoWindow(object):
def __init__(self, torrent_box, closefunc):
self.win = Window()
self.torrent_box = torrent_box
name = self.torrent_box.metainfo.name
self.win.set_title(_('Info for "%s"')%name)
self.win.set_size_request(-1,-1)
self.win.set_border_width(SPACING)
self.win.set_resizable(False)
self.win.connect('destroy', closefunc)
self.vbox = gtk.VBox(spacing=SPACING)
self.table = gtk.Table(rows=4, columns=3, homogeneous=False)
self.table.set_row_spacings(SPACING)
self.table.set_col_spacings(SPACING)
y = 0
def add_item(key, val, y):
self.table.attach(ralign(gtk.Label(key)), 0, 1, y, y+1)
v = gtk.Label(val)
v.set_selectable(True)
self.table.attach(lalign(v), 1, 2, y, y+1)
add_item(_("Torrent name:"), name, y)
y+=1
announce = ''
if self.torrent_box.metainfo.is_trackerless:
announce = _("(trackerless torrent)")
else:
announce = self.torrent_box.metainfo.announce
add_item(_("Announce url:"), announce, y)
y+=1
size = Size(self.torrent_box.metainfo.total_bytes)
num_files = _(", in one file")
if self.torrent_box.is_batch:
num_files = _(", in %d files") % len(self.torrent_box.metainfo.sizes)
add_item(_("Total size:"), str(size)+num_files, y)
y+=1
if advanced_ui:
pl = self.torrent_box.metainfo.piece_length
count, lastlen = divmod(size, pl)
sizedetail = '%d x %d + %d = %d' % (count, pl, lastlen, int(size))
add_item(_("Pieces:"), sizedetail, y)
y+=1
add_item(_("Info hash:"), self.torrent_box.infohash.encode('hex'), y)
y+=1
path = self.torrent_box.dlpath
filename = ''
if path is None:
path = ''
else:
if not self.torrent_box.is_batch:
path,filename = os.path.split(self.torrent_box.dlpath)
if path[-1] != os.sep:
path += os.sep
path = path_wrap(path)
add_item(_("Save in:"), path, y)
y+=1
if not self.torrent_box.is_batch:
add_item(_("File name:"), path_wrap(filename), y)
y+=1
self.vbox.pack_start(self.table)
if self.torrent_box.metainfo.comment not in (None, ''):
commentbuffer = gtk.TextBuffer()
commentbuffer.set_text(self.torrent_box.metainfo.comment)
commenttext = gtk.TextView(commentbuffer)
commenttext.set_editable(False)
commenttext.set_cursor_visible(False)
commenttext.set_wrap_mode(gtk.WRAP_WORD)
commentscroll = gtk.ScrolledWindow()
commentscroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
commentscroll.set_shadow_type(gtk.SHADOW_IN)
commentscroll.add(commenttext)
self.vbox.pack_start(commentscroll)
self.vbox.pack_start(gtk.HSeparator(), expand=False, fill=False)
self.hbox = gtk.HBox(spacing=SPACING)
lbbox = gtk.HButtonBox()
rbbox = gtk.HButtonBox()
lbbox.set_spacing(SPACING)
if LaunchPath.can_launch_files:
opendirbutton = IconButton(_("_Open directory"), stock=gtk.STOCK_OPEN)
opendirbutton.connect('clicked', self.torrent_box.open_dir)
lbbox.pack_start(opendirbutton, expand=False, fill=False)
opendirbutton.set_sensitive(self.torrent_box.can_open_dir())
filelistbutton = IconButton(_("Show _file list"), stock='gtk-index')
if self.torrent_box.is_batch:
filelistbutton.connect('clicked', self.torrent_box.open_filelist)
else:
filelistbutton.set_sensitive(False)
lbbox.pack_start(filelistbutton, expand=False, fill=False)
closebutton = gtk.Button(stock='gtk-close')
closebutton.connect('clicked', lambda w: self.close())
rbbox.pack_end(closebutton, expand=False, fill=False)
self.hbox.pack_start(lbbox, expand=False, fill=False)
self.hbox.pack_end( rbbox, expand=False, fill=False)
self.vbox.pack_end(self.hbox, expand=False, fill=False)
self.win.add(self.vbox)
self.win.show_all()
def close(self):
self.win.destroy()
class TorrentBox(gtk.EventBox):
torrent_tip_format = '%s:\n %s\n %s'
def __init__(self, infohash, metainfo, dlpath, completion, main):
gtk.EventBox.__init__(self)
self.infohash = infohash
self.metainfo = metainfo
self.completion = completion
self.main = main
self.main_torrent_dnd_tip = _("drag to reorder")
self.torrent_menu_tip = _("right-click for menu")
self.set_save_location(dlpath)
self.uptotal = self.main.torrents[self.infohash].uptotal
self.downtotal = self.main.torrents[self.infohash].downtotal
if self.downtotal > 0:
self.up_down_ratio = self.uptotal / self.metainfo.total_bytes
else:
self.up_down_ratio = None
self.infowindow = None
self.filelistwindow = None
self.is_batch = metainfo.is_batch
self.menu = None
self.menu_handler = None
self.vbox = gtk.VBox(homogeneous=False, spacing=SPACING)
self.label = gtk.Label()
self.set_name()
self.vbox.pack_start(lalign(self.label), expand=False, fill=False)
self.hbox = gtk.HBox(homogeneous=False, spacing=SPACING)
self.icon = gtk.Image()
self.icon.set_size_request(-1, 29)
self.iconbox = gtk.VBox()
self.iconevbox = gtk.EventBox()
self.iconevbox.add(self.icon)
self.iconbox.pack_start(self.iconevbox, expand=False, fill=False)
self.hbox.pack_start(self.iconbox, expand=False, fill=False)
self.vbox.pack_start(self.hbox)
self.infobox = gtk.VBox(homogeneous=False)
self.progressbarbox = gtk.HBox(homogeneous=False, spacing=SPACING)
self.progressbar = gtk.ProgressBar()
self.reset_progressbar_color()
if self.completion is not None:
self.progressbar.set_fraction(self.completion)
if self.completion >= 1:
done_label = self.make_done_label()
self.progressbar.set_text(done_label)
else:
self.progressbar.set_text('%.1f%%'%(self.completion*100))
else:
self.progressbar.set_text('?')
self.progressbarbox.pack_start(self.progressbar,
expand=True, fill=True)
self.buttonevbox = gtk.EventBox()
self.buttonbox = gtk.HBox(homogeneous=True, spacing=SPACING)
self.infobutton = gtk.Button()
self.infoimage = gtk.Image()
self.infoimage.set_from_stock('bt-info', gtk.ICON_SIZE_BUTTON)
self.infobutton.add(self.infoimage)
self.infobutton.connect('clicked', self.open_info)
self.main.tooltips.set_tip(self.infobutton,
_("Torrent info"))
self.buttonbox.pack_start(self.infobutton, expand=True)
self.cancelbutton = gtk.Button()
self.cancelimage = gtk.Image()
if self.completion is not None and self.completion >= 1:
self.cancelimage.set_from_stock('bt-remove', gtk.ICON_SIZE_BUTTON)
self.main.tooltips.set_tip(self.cancelbutton,
_("Remove torrent"))
else:
self.cancelimage.set_from_stock('bt-abort', gtk.ICON_SIZE_BUTTON)
self.main.tooltips.set_tip(self.cancelbutton,
_("Abort torrent"))
self.cancelbutton.add(self.cancelimage)
# not using 'clicked' because we want to check for CTRL key
self.cancelbutton.connect('button-release-event', self.confirm_remove)
self.buttonbox.pack_start(self.cancelbutton, expand=True, fill=False)
self.buttonevbox.add(self.buttonbox)
vbuttonbox = gtk.VBox(homogeneous=False)
vbuttonbox.pack_start(self.buttonevbox, expand=False, fill=False)
self.hbox.pack_end(vbuttonbox, expand=False, fill=False)
self.infobox.pack_start(self.progressbarbox, expand=False, fill=False)
self.hbox.pack_start(self.infobox, expand=True, fill=True)
self.add( self.vbox )
self.drag_source_set(gtk.gdk.BUTTON1_MASK,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.connect('drag_data_get', self.drag_data_get)
self.connect('drag_begin' , self.drag_begin )
self.connect('drag_end' , self.drag_end )
self.cursor_handler_id = self.connect('enter_notify_event', self.change_cursors)
def set_save_location(self, dlpath):
self.dlpath = dlpath
updater_infohash = self.main.updater.infohash
if updater_infohash == self.infohash:
my_installer_dir = os.path.split(self.dlpath)[0]
if self.main.updater.installer_dir != my_installer_dir:
self.main.updater.set_installer_dir(my_installer_dir)
def reset_progressbar_color(self):
# Hack around broken GTK-Wimp theme:
# make progress bar text always black
# see task #694
if is_frozen_exe and self.main.config['progressbar_hack']:
style = self.progressbar.get_style().copy()
black = style.black
self.progressbar.modify_fg(gtk.STATE_PRELIGHT, black)
def change_cursors(self, *args):
# BUG: this is in a handler that is disconnected because the
# window attributes are None until after show_all() is called
self.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND2))
self.buttonevbox.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.LEFT_PTR))
self.disconnect(self.cursor_handler_id)
def drag_data_get(self, widget, context, selection, targetType, eventTime):
selection.set(selection.target, 8, self.infohash)
def drag_begin(self, *args):
pass
def drag_end(self, *args):
self.main.drag_end()
def make_done_label(self, statistics=None):
s = ''
if statistics and statistics['timeEst'] is not None:
s = _(", will seed for %s") % Duration(statistics['timeEst'])
elif statistics:
s = _(", will seed indefinitely.")
if self.up_down_ratio is not None:
done_label = _("Done, share ratio: %d%%") % \
(self.up_down_ratio*100) + s
elif statistics is not None:
done_label = _("Done, %s uploaded") % \
Size(statistics['upTotal']) + s
else:
done_label = _("Done")
return done_label
def set_name(self):
self.label.set_text(self.metainfo.name)
self.label.set_ellipsize(pango.ELLIPSIZE_END)
def make_menu(self, extra_menu_items=[]):
if self.menu_handler:
self.disconnect(self.menu_handler)
## Basic Info
menu_items = [ MenuItem(_("Torrent _info" ), func=self.open_info), ]
open_dir_func = None
if LaunchPath.can_launch_files and self.can_open_dir():
open_dir_func = self.open_dir
menu_items.append( MenuItem(_("_Open directory" ), func=open_dir_func) )
filelistfunc = None
if self.is_batch:
filelistfunc = self.open_filelist
menu_items.append(MenuItem(_("_File list"), func=filelistfunc))
if self.torrent_state == RUNNING:
menu_items.append(MenuItem(_("_Peer list"), func=self.open_peerlist))
## end Basic Info
menu_items.append(gtk.SeparatorMenuItem())
## Settings
# change save location
change_save_location_func = None
if self.torrent_state != RUNNING and self.completion <= 0:
change_save_location_func = self.change_save_location
menu_items.append(MenuItem(_("_Change location"),
func=change_save_location_func))
# seed forever item
self.seed_forever_item = gtk.CheckMenuItem(_("_Seed indefinitely"))
self.reset_seed_forever()
def sft(widget, *args):
active = widget.get_active()
infohash = self.infohash
for option in ('seed_forever', 'seed_last_forever'):
self.main.torrentqueue.set_config(option, active, infohash)
self.main.torrentqueue.set_config(option, active, infohash)
self.seed_forever_item.connect('toggled', sft)
menu_items.append(self.seed_forever_item)
## end Settings
menu_items.append(gtk.SeparatorMenuItem())
## Queue state dependent items
if self.torrent_state == KNOWN:
menu_items.append( MenuItem(_("Re_start"), func=self.move_to_end ))
elif self.torrent_state == QUEUED:
#Here's where we'll put the "Start hash check" menu item
menu_items.append(MenuItem(_("Download _now"), func=self.start))
elif self.torrent_state in (RUNNING, RUN_QUEUED):
# no items for here
pass
## Completion dependent items
if self.completion is not None and self.completion >= 1:
if self.torrent_state != KNOWN:
menu_items.append(MenuItem(_("_Finish"), func=self.finish))
menu_items.append( MenuItem(_("_Remove" ), func=self.confirm_remove))
else:
if self.torrent_state in (RUNNING, RUN_QUEUED):
menu_items.append(MenuItem(_("Download _later"), func=self.move_to_end))
else:
#Here's where we'll put the "Seed _later" menu item
pass
menu_items.append(MenuItem(_("_Abort" ), func=self.confirm_remove))
## build the menu
self.menu = gtk.Menu()
for i in menu_items:
i.show()
self.menu.add(i)
self.menu_handler = self.connect_object("event", self.show_menu, self.menu)
def reset_seed_forever(self):
sfb = False
d = self.main.torrents[self.infohash].config.getDict()
if d.has_key('seed_forever'):
sfb = d['seed_forever']
self.seed_forever_item.set_active(bool(sfb))
def change_save_location(self, widget=None):
self.main.change_save_location(self.infohash)
def open_info(self, widget=None):
if self.infowindow is None:
self.infowindow = TorrentInfoWindow(self, self.infoclosed)
def infoclosed(self, widget=None):
self.infowindow = None
def close_info(self):
if self.infowindow is not None:
self.infowindow.close()
def open_filelist(self, widget):
if not self.is_batch:
return
if self.filelistwindow is None:
self.filelistwindow = FileListWindow(self.metainfo,
self.filelistclosed)
self.main.torrentqueue.check_completion(self.infohash, True)
def filelistclosed(self, widget):
self.filelistwindow = None
def close_filelist(self):
if self.filelistwindow is not None:
self.filelistwindow.close()
def close_child_windows(self):
self.close_info()
self.close_filelist()
def destroy(self):
if self.menu is not None:
self.menu.destroy()
self.menu = None
gtk.EventBox.destroy(self)
def show_menu(self, widget, event):
if event.type == gtk.gdk.BUTTON_PRESS and event.button == 3:
widget.popup(None, None, None, event.button, event.time)
return True
return False
def _short_path(self, dlpath):
path_length = 40
sep = '...'
ret = os.path.split(dlpath)[0]
if len(ret) > path_length+len(sep):
return ret[:int(path_length/2)]+sep+ret[-int(path_length/2):]
else:
return ret
def get_path_to_open(self):
path = self.dlpath
if not self.is_batch:
path = os.path.split(self.dlpath)[0]
return path
def can_open_dir(self):
return os.access(self.get_path_to_open(), os.F_OK|os.R_OK)
def open_dir(self, widget):
LaunchPath.launchdir(self.get_path_to_open())
def confirm_remove(self, widget, event=None):
if event is not None and event.get_state() & gtk.gdk.CONTROL_MASK:
self.remove()
else:
message = _('Are you sure you want to remove "%s"?') % self.metainfo.name
if self.completion >= 1:
if self.up_down_ratio is not None:
message = _("Your share ratio for this torrent is %d%%. ")%(self.up_down_ratio*100) + message
else:
message = _("You have uploaded %s to this torrent. ")%(Size(self.uptotal)) + message
d = MessageDialog(self.main.mainwindow,
_("Remove this torrent?"),
message,
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_OK_CANCEL,
yesfunc=self.remove,
default=gtk.RESPONSE_OK,
)
def remove(self):
self.main.torrentqueue.remove_torrent(self.infohash)
class KnownTorrentBox(TorrentBox):
torrent_state = KNOWN
def __init__(self, infohash, metainfo, dlpath, completion, main):
TorrentBox.__init__(self, infohash, metainfo, dlpath, completion, main)
status_tip = ''
if completion >= 1:
self.icon.set_from_stock('bt-finished', gtk.ICON_SIZE_LARGE_TOOLBAR)
status_tip = _("Finished")
known_torrent_dnd_tip = _("drag into list to seed")
else:
self.icon.set_from_stock('bt-broken', gtk.ICON_SIZE_LARGE_TOOLBAR)
status_tip = _("Failed")
known_torrent_dnd_tip = _("drag into list to resume")
self.main.tooltips.set_tip(self.iconevbox,
self.torrent_tip_format % (status_tip,
known_torrent_dnd_tip,
self.torrent_menu_tip))
self.make_menu()
self.show_all()
def move_to_end(self, widget):
self.main.change_torrent_state(self.infohash, QUEUED)
class DroppableTorrentBox(TorrentBox):
def __init__(self, infohash, metainfo, dlpath, completion, main):
TorrentBox.__init__(self, infohash, metainfo, dlpath, completion, main)
self.drag_dest_set(gtk.DEST_DEFAULT_DROP,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.connect('drag_data_received', self.drag_data_received)
self.connect('drag_motion', self.drag_motion)
self.index = None
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
if targetType == BT_TARGET_TYPE:
half_height = self.size_request()[1] // 2
where = cmp(y, half_height)
if where == 0: where = 1
self.parent.put_infohash_at_child(selection.data, self, where)
else:
self.main.accept_dropped_file(widget, context, x, y, selection, targetType, time)
def drag_motion(self, widget, context, x, y, time):
self.get_current_index()
half_height = self.size_request()[1] // 2
if y < half_height:
self.parent.highlight_before_index(self.index)
else:
self.parent.highlight_after_index(self.index)
return False
def drag_end(self, *args):
self.parent.highlight_child()
TorrentBox.drag_end(self, *args)
def get_current_index(self):
self.index = self.parent.get_index_from_child(self)
class QueuedTorrentBox(DroppableTorrentBox):
icon_name = 'bt-queued'
torrent_state = QUEUED
def __init__(self, infohash, metainfo, dlpath, completion, main):
DroppableTorrentBox.__init__(self, infohash, metainfo, dlpath, completion, main)
self.state_name = _("Waiting")
self.main.tooltips.set_tip(self.iconevbox,
self.torrent_tip_format % (self.state_name,
self.main_torrent_dnd_tip,
self.torrent_menu_tip))
self.icon.set_from_stock(self.icon_name, gtk.ICON_SIZE_LARGE_TOOLBAR)
self.make_menu()
self.show_all()
def start(self, widget):
self.main.runbox.put_infohash_last(self.infohash)
def finish(self, widget):
self.main.change_torrent_state(self.infohash, KNOWN)
class PausedTorrentBox(DroppableTorrentBox):
icon_name = 'bt-paused'
torrent_state = RUN_QUEUED
def __init__(self, infohash, metainfo, dlpath, completion, main):
DroppableTorrentBox.__init__(self, infohash, metainfo, dlpath, completion, main)
self.state_name = _("Paused")
self.main.tooltips.set_tip(self.iconevbox,
self.torrent_tip_format % (self.state_name,
self.main_torrent_dnd_tip,
self.torrent_menu_tip))
self.icon.set_from_stock(self.icon_name, gtk.ICON_SIZE_LARGE_TOOLBAR)
self.make_menu()
self.show_all()
def move_to_end(self, widget):
self.main.change_torrent_state(self.infohash, QUEUED)
def finish(self, widget):
self.main.change_torrent_state(self.infohash, KNOWN)
def update_status(self, statistics):
# in case the TorrentQueue thread calls widget.update_status()
# before the GUI has changed the torrent widget to a
# RunningTorrentBox
pass
class RunningTorrentBox(PausedTorrentBox):
torrent_state = RUNNING
def __init__(self, infohash, metainfo, dlpath, completion, main):
DroppableTorrentBox.__init__(self, infohash, metainfo, dlpath, completion, main)
self.main.tooltips.set_tip(self.iconevbox,
self.torrent_tip_format % (_("Running"),
self.main_torrent_dnd_tip,
self.torrent_menu_tip))
self.seed = False
self.peerlistwindow = None
self.update_peer_list_flag = 0
self.icon.set_from_stock('bt-running', gtk.ICON_SIZE_LARGE_TOOLBAR)
self.rate_label_box = gtk.HBox(homogeneous=True)
self.up_rate = gtk.Label()
self.down_rate = gtk.Label()
self.rate_label_box.pack_start(lalign(self.up_rate ),
expand=True, fill=True)
self.rate_label_box.pack_start(lalign(self.down_rate),
expand=True, fill=True)
self.infobox.pack_start(self.rate_label_box)
if advanced_ui:
self.extrabox = gtk.VBox(homogeneous=False)
#self.extrabox = self.vbox
self.up_curr = FancyLabel(_("Current up: %s" ), 0)
self.down_curr = FancyLabel(_("Current down: %s"), 0)
self.curr_box = gtk.HBox(homogeneous=True)
self.curr_box.pack_start(lalign(self.up_curr ), expand=True, fill=True)
self.curr_box.pack_start(lalign(self.down_curr), expand=True, fill=True)
self.extrabox.pack_start(self.curr_box)
self.up_prev = FancyLabel(_("Previous up: %s" ), 0)
self.down_prev = FancyLabel(_("Previous down: %s"), 0)
self.prev_box = gtk.HBox(homogeneous=True)
self.prev_box.pack_start(lalign(self.up_prev ), expand=True, fill=True)
self.prev_box.pack_start(lalign(self.down_prev), expand=True, fill=True)
self.extrabox.pack_start(self.prev_box)
self.share_ratio = FancyLabel(_("Share ratio: %0.02f%%"), 0)
self.extrabox.pack_start(lalign(self.share_ratio))
self.peer_info = FancyLabel(_("%s peers, %s seeds. Totals from "
"tracker: %s"), 0, 0, 'NA')
self.extrabox.pack_start(lalign(self.peer_info))
self.dist_copies = FancyLabel(_("Distributed copies: %d; Next: %s"), 0, '')
self.extrabox.pack_start(lalign(self.dist_copies))
self.piece_info = FancyLabel(_("Pieces: %d total, %d complete, "
"%d partial, %d active (%d empty)"), *(0,)*5)
self.extrabox.pack_start(lalign(self.piece_info))
self.bad_info = FancyLabel(_("%d bad pieces + %s in discarded requests"), 0, 0)
self.extrabox.pack_start(lalign(self.bad_info))
# extra info
pl = self.metainfo.piece_length
tl = self.metainfo.total_bytes
count, lastlen = divmod(tl, pl)
self.piece_count = count + (lastlen > 0)
self.infobox.pack_end(self.extrabox, expand=False, fill=False)
self.make_menu()
self.show_all()
def change_to_completed(self):
self.completion = 1.0
self.cancelimage.set_from_stock('bt-remove', gtk.ICON_SIZE_BUTTON)
self.main.tooltips.set_tip(self.cancelbutton,
_("Remove torrent"))
updater_infohash = self.main.updater.infohash
if updater_infohash == self.infohash:
self.main.updater.start_install()
self.make_menu()
def close_child_windows(self):
TorrentBox.close_child_windows(self)
self.close_peerlist()
def open_filelist(self, widget):
if not self.is_batch:
return
if self.filelistwindow is None:
self.filelistwindow = FileListWindow(self.metainfo,
self.filelistclosed)
self.main.make_statusrequest()
def open_peerlist(self, widget):
if self.peerlistwindow is None:
self.peerlistwindow = PeerListWindow(self.metainfo.name,
self.peerlistclosed)
self.main.make_statusrequest()
def peerlistclosed(self, widget):
self.peerlistwindow = None
self.update_peer_list_flag = 0
def close_peerlist(self):
if self.peerlistwindow is not None:
self.peerlistwindow.close()
rate_label = ': %s'
eta_label = '?'
done_label = _("Done")
progress_bar_label = _("%.1f%% done, %s remaining")
down_rate_label = _("Download rate")
up_rate_label = _("Upload rate" )
def update_status(self, statistics):
fractionDone = statistics.get('fractionDone')
activity = statistics.get('activity')
self.main.set_title(torrentName=self.metainfo.name,
fractionDone=fractionDone)
dt = self.downtotal
if statistics.has_key('downTotal'):
dt += statistics['downTotal']
ut = self.uptotal
if statistics.has_key('upTotal'):
ut += statistics['upTotal']
if dt > 0:
self.up_down_ratio = ut / self.metainfo.total_bytes
done_label = self.done_label
eta_label = self.eta_label
if 'numPeers' in statistics:
eta = statistics.get('timeEst')
if eta is not None:
eta_label = Duration(eta)
if fractionDone == 1:
done_label = self.make_done_label(statistics)
if fractionDone == 1:
self.progressbar.set_fraction(1)
self.progressbar.set_text(done_label)
self.reset_seed_forever()
if not self.completion >= 1:
self.change_to_completed()
else:
self.progressbar.set_fraction(fractionDone)
progress_bar_label = self.progress_bar_label % \
(int(fractionDone*1000)/10, eta_label)
self.progressbar.set_text(progress_bar_label)
if 'numPeers' not in statistics:
return
self.down_rate.set_text(self.down_rate_label+self.rate_label %
Rate(statistics['downRate']))
self.up_rate.set_text (self.up_rate_label+self.rate_label %
Rate(statistics['upRate']))
if advanced_ui:
if self.up_down_ratio is not None:
self.share_ratio.set_value(self.up_down_ratio*100)
num_seeds = statistics['numSeeds']
if self.seed:
num_seeds = statistics['numOldSeeds'] = 0 # !@# XXX
if statistics['trackerPeers'] is not None:
totals = '%d/%d' % (statistics['trackerPeers'],
statistics['trackerSeeds'])
else:
totals = _("NA")
self.peer_info.set_value(statistics['numPeers'], num_seeds, totals)
self.up_curr.set_value(str(Size(statistics['upTotal'])))
self.down_curr.set_value(str(Size(statistics['downTotal'])))
self.up_prev.set_value(str(Size(self.uptotal)))
self.down_prev.set_value(str(Size(self.downtotal)))
# refresh extra info
self.piece_info.set_value(self.piece_count,
statistics['storage_numcomplete'],
statistics['storage_dirty'],
statistics['storage_active'],
statistics['storage_new'] )
self.dist_copies.set_value( statistics['numCopies'], ', '.join(["%d:%.1f%%" % (a, int(b*1000)/10) for a, b in zip(itertools.count(int(statistics['numCopies']+1)), statistics['numCopyList'])]))
self.bad_info.set_value(statistics['storage_numflunked'], Size(statistics['discarded']))
if self.peerlistwindow is not None:
if self.update_peer_list_flag == 0:
spew = statistics.get('spew')
if spew is not None:
self.peerlistwindow.update(spew, statistics['bad_peers'])
self.update_peer_list_flag = (self.update_peer_list_flag + 1) % 4
if self.filelistwindow is not None:
if 'files_left' in statistics:
self.filelistwindow.update(statistics['files_left'],
statistics['files_allocated'])
class DroppableHSeparator(PaddedHSeparator):
def __init__(self, box, spacing=SPACING):
PaddedHSeparator.__init__(self, spacing)
self.box = box
self.main = box.main
self.drag_dest_set(gtk.DEST_DEFAULT_DROP,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.connect('drag_data_received', self.drag_data_received)
self.connect('drag_motion' , self.drag_motion )
def drag_highlight(self):
self.sep.drag_highlight()
self.main.add_unhighlight_handle()
def drag_unhighlight(self):
self.sep.drag_unhighlight()
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
if targetType == BT_TARGET_TYPE:
self.box.drop_on_separator(self, selection.data)
else:
self.main.accept_dropped_file(widget, context, x, y, selection, targetType, time)
def drag_motion(self, wid, context, x, y, time):
self.drag_highlight()
return False
class DroppableBox(HSeparatedBox):
def __init__(self, main, spacing=0):
HSeparatedBox.__init__(self, spacing=spacing)
self.main = main
self.drag_dest_set(gtk.DEST_DEFAULT_DROP,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.connect('drag_data_received', self.drag_data_received)
self.connect('drag_motion', self.drag_motion)
def drag_motion(self, widget, context, x, y, time):
return False
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
pass
class KnownBox(DroppableBox):
def __init__(self, main, spacing=0):
DroppableBox.__init__(self, main, spacing=spacing)
self.drag_dest_set(gtk.DEST_DEFAULT_DROP,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
def pack_start(self, widget, *args, **kwargs):
old_len = len(self.get_children())
DroppableBox.pack_start(self, widget, *args, **kwargs)
if old_len <= 0:
self.main.maximize_known_pane()
self.main.knownscroll.scroll_to_bottom()
def remove(self, widget):
DroppableBox.remove(self, widget)
new_len = len(self.get_children())
if new_len == 0:
self.main.maximize_known_pane()
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
if targetType == BT_TARGET_TYPE:
infohash = selection.data
self.main.finish(infohash)
else:
self.main.accept_dropped_file(widget, context, x, y, selection, targetType, time)
def drag_motion(self, widget, context, x, y, time):
self.main.drag_highlight(widget=self)
def drag_highlight(self):
self.main.knownscroll.drag_highlight()
self.main.add_unhighlight_handle()
def drag_unhighlight(self):
self.main.knownscroll.drag_unhighlight()
class RunningAndQueueBox(gtk.VBox):
def __init__(self, main, **kwargs):
gtk.VBox.__init__(self, **kwargs)
self.main = main
def drop_on_separator(self, sep, infohash):
self.main.change_torrent_state(infohash, QUEUED, 0)
def highlight_between(self):
self.drag_highlight()
def drag_highlight(self):
self.get_children()[1].drag_highlight()
def drag_unhighlight(self):
self.get_children()[1].drag_unhighlight()
class SpacerBox(DroppableBox):
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
if targetType == BT_TARGET_TYPE:
infohash = selection.data
self.main.queuebox.put_infohash_last(infohash)
else:
self.main.accept_dropped_file(widget, context, x, y, selection, targetType, time)
return True
BEFORE = -1
AFTER = 1
class ReorderableBox(DroppableBox):
def new_separator(self):
return DroppableHSeparator(self)
def __init__(self, main):
DroppableBox.__init__(self, main)
self.main = main
self.drag_dest_set(gtk.DEST_DEFAULT_DROP,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.connect('drag_data_received', self.drag_data_received)
self.connect('drag_motion' , self.drag_motion)
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
if targetType == BT_TARGET_TYPE:
half_height = self.size_request()[1] // 2
if y < half_height:
self.put_infohash_first(selection.data)
else:
self.put_infohash_last(selection.data)
else:
self.main.accept_dropped_file(widget, context, x, y, selection, targetType, time)
return True
def drag_motion(self, widget, context, x, y, time):
return False
def drag_highlight(self):
final = self.get_children()[-1]
final.drag_highlight()
self.main.add_unhighlight_handle()
def drag_unhighlight(self):
self.highlight_child(index=None)
self.parent.drag_unhighlight()
def highlight_before_index(self, index):
self.drag_unhighlight()
children = self._get_children()
if index > 0:
children[index*2 - 1].drag_highlight()
else:
self.highlight_at_top()
def highlight_after_index(self, index):
self.drag_unhighlight()
children = self._get_children()
if index*2 < len(children)-1:
children[index*2 + 1].drag_highlight()
else:
self.highlight_at_bottom()
def highlight_child(self, index=None):
for i, child in enumerate(self._get_children()):
if index is not None and i == index*2:
child.drag_highlight()
else:
child.drag_unhighlight()
def drop_on_separator(self, sep, infohash):
children = self._get_children()
for i, child in enumerate(children):
if child == sep:
reference_child = children[i-1]
self.put_infohash_at_child(infohash, reference_child, AFTER)
break
def get_queue(self):
queue = []
c = self.get_children()
for t in c:
queue.append(t.infohash)
return queue
def put_infohash_first(self, infohash):
self.highlight_child()
children = self.get_children()
if len(children) > 1 and infohash == children[0].infohash:
return
self.put_infohash_at_index(infohash, 0)
def put_infohash_last(self, infohash):
self.highlight_child()
children = self.get_children()
end = len(children)
if len(children) > 1 and infohash == children[end-1].infohash:
return
self.put_infohash_at_index(infohash, end)
def put_infohash_at_child(self, infohash, reference_child, where):
self.highlight_child()
if infohash == reference_child.infohash:
return
target_index = self.get_index_from_child(reference_child)
if where == AFTER:
target_index += 1
self.put_infohash_at_index(infohash, target_index)
def get_index_from_child(self, child):
c = self.get_children()
ret = -1
try:
ret = c.index(child)
except ValueError:
pass
return ret
def highlight_at_top(self):
raise NotImplementedError
def highlight_at_bottom(self):
raise NotImplementedError
def put_infohash_at_index(self, infohash, end):
raise NotImplementedError
class RunningBox(ReorderableBox):
def put_infohash_at_index(self, infohash, target_index):
#print 'RunningBox.put_infohash_at_index', infohash.encode('hex')[:8], target_index
l = self.get_queue()
replaced = None
if l:
replaced = l[-1]
self.main.confirm_replace_running_torrent(infohash, replaced,
target_index)
def highlight_at_top(self):
pass
# BUG: Don't know how I will indicate in the UI that the top of the list is highlighted
def highlight_at_bottom(self):
self.parent.highlight_between()
class QueuedBox(ReorderableBox):
def put_infohash_at_index(self, infohash, target_index):
#print 'want to put', infohash.encode('hex'), 'at', target_index
self.main.change_torrent_state(infohash, QUEUED, target_index)
def highlight_at_top(self):
self.parent.highlight_between()
def highlight_at_bottom(self):
pass
# BUG: Don't know how I will indicate in the UI that the bottom of the list is highlighted
class Struct(object):
pass
class SearchField(gtk.Entry):
def __init__(self, default_text, visit_url_func):
gtk.Entry.__init__(self)
self.default_text = default_text
self.visit_url_func = visit_url_func
self.set_text(self.default_text)
self.set_size_request(150, -1)
# default gtk Entry dnd processing is broken on linux!
# - default Motion handling causes asyncs
# - there's no way to filter the default text dnd
# see the parent window for a very painful work-around
self.drag_dest_unset()
self.connect('key-press-event', self.check_for_enter)
self.connect('button-press-event', self.begin_edit)
self.search_completion = gtk.EntryCompletion()
self.search_completion.set_text_column(0)
self.search_store = gtk.ListStore(gobject.TYPE_STRING)
self.search_completion.set_model(self.search_store)
self.set_completion(self.search_completion)
self.reset_text()
self.timeout_id = None
def begin_edit(self, *args):
if self.get_text() == self.default_text:
self.set_text('')
def check_for_enter(self, widget, event):
if event.keyval in (gtk.keysyms.Return, gtk.keysyms.KP_Enter):
self.search()
def reset_text(self):
self.set_text(self.default_text)
def search(self, *args):
search_term = self.get_text()
if search_term and search_term != self.default_text:
self.search_store.append([search_term])
search_url = SEARCH_URL % {'query' :zurllib.quote(search_term),
'client':'M-%s'%version.replace('.','-')}
self.timeout_id = gobject.timeout_add(2000, self.resensitize)
self.set_sensitive(False)
self.visit_url_func(search_url, callback=self.resensitize)
else:
self.reset_text()
self.select_region(0, -1)
self.grab_focus()
def resensitize(self):
self.set_sensitive(True)
self.reset_text()
if self.timeout_id is not None:
gobject.source_remove(self.timeout_id)
self.timeout_id = None
class DownloadInfoFrame(object):
def __init__(self, config, torrentqueue):
self.config = config
if self.config['save_in'] == '':
self.config['save_in'] = smart_dir('')
self.torrentqueue = torrentqueue
self.torrents = {}
self.running_torrents = {}
self.lists = {}
self.update_handle = None
self.unhighlight_handle = None
self.custom_size = False
self.child_windows = {}
self.postponed_save_windows = []
self.helpwindow = None
self.errordialog = None
self.mainwindow = Window(gtk.WINDOW_TOPLEVEL)
#tray icon
self.trayicon = TrayIcon(not self.config['start_minimized'],
toggle_func=self.toggle_shown,
quit_func=self.quit)
self.traythread = threading.Thread(target=self.trayicon.enable,
args=())
self.traythread.setDaemon(True)
if os.name == "nt":
# gtk has no way to check this?
self.iconized = False
self.mainwindow.connect('window-state-event', self.window_event)
if self.config['start_minimized']:
self.mainwindow.iconify()
gtk.gdk.threads_enter()
self.mainwindow.set_border_width(0)
self.set_seen_remote_connections(False)
self.set_seen_connections(False)
self.mainwindow.drag_dest_set(gtk.DEST_DEFAULT_ALL,
TARGET_EXTERNAL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.mainwindow.connect('drag_leave' , self.drag_leave )
self.mainwindow.connect('drag_data_received', self.accept_dropped_file)
self.mainwindow.set_size_request(WINDOW_WIDTH, -1)
self.mainwindow.connect('destroy', self.cancel)
self.mainwindow.connect('size-allocate', self.size_was_allocated)
self.accel_group = gtk.AccelGroup()
self.mainwindow.add_accel_group(self.accel_group)
#self.accel_group.connect(ord('W'), gtk.gdk.CONTROL_MASK, gtk.ACCEL_LOCKED,
# lambda *args: self.mainwindow.destroy())
self.tooltips = gtk.Tooltips()
self.logbuffer = LogBuffer()
self.log_text(_("%s started")%app_name, severity=None)
self.box1 = gtk.VBox(homogeneous=False, spacing=0)
self.box2 = gtk.VBox(homogeneous=False, spacing=0)
self.box2.set_border_width(SPACING)
self.menubar = gtk.MenuBar()
self.box1.pack_start(self.menubar, expand=False, fill=False)
self.ssbutton = StopStartButton(self)
# keystrokes used: A D F H L N O P Q S U X (E)
quit_menu_label = _("_Quit")
if os.name == 'nt':
quit_menu_label = _("E_xit")
file_menu_items = ((_("_Open torrent file"), self.select_torrent_to_open),
(_("Open torrent _URL"), self.enter_url_to_open),
(_("Make _new torrent" ), self.make_new_torrent),
('----' , None),
(_("_Pause/Play"), self.ssbutton.toggle),
('----' , None),
(quit_menu_label , lambda w: self.mainwindow.destroy()),
)
view_menu_items = ((_("Show/Hide _finished torrents"), self.toggle_known),
# BUG: if you reorder this menu, see def set_custom_size() first
(_("_Resize window to fit"), lambda w: self.resize_to_fit()),
('----' , None),
(_("_Log") , lambda w: self.open_window('log')),
# 'View log of all download activity',
#('----' , None),
(_("_Settings") , lambda w: self.open_window('settings')),
#'Change download behavior and network settings',
)
help_menu_items = ((_("_Help") , self.open_help),
#(_("_Help Window") , lambda w: self.open_window('help')),
(_("_About") , lambda w: self.open_window('about')),
(_("_Donate") , lambda w: self.donate()),
#(_("Rais_e") , lambda w: self.raiseerror()),
)
self.filemenu = gtk.MenuItem(_("_File"))
self.filemenu.set_submenu(build_menu(file_menu_items, self.accel_group))
self.filemenu.show()
self.viewmenu = gtk.MenuItem(_("_View"))
self.viewmenu.set_submenu(build_menu(view_menu_items, self.accel_group))
self.viewmenu.show()
self.helpmenu = gtk.MenuItem(_("_Help"))
self.helpmenu.set_submenu(build_menu(help_menu_items, self.accel_group))
self.helpmenu.show()
if os.name != 'nt':
self.helpmenu.set_right_justified(True)
self.menubar.append(self.filemenu)
self.menubar.append(self.viewmenu)
self.menubar.append(self.helpmenu)
self.menubar.show()
self.header = gtk.HBox(homogeneous=False)
self.box1.pack_start(self.box2, expand=False, fill=False)
# control box: rate slider, start-stop button, search widget, status light
self.controlbox = gtk.HBox(homogeneous=False)
controlbox_padding = SPACING//2
# stop-start button
self.controlbox.pack_start(malign(self.ssbutton),
expand=False, fill=False)
# rate slider
self.rate_slider_box = RateSliderBox(self.config, self.torrentqueue)
self.controlbox.pack_start(self.rate_slider_box,
expand=True, fill=True,
padding=controlbox_padding)
self.controlbox.pack_start(gtk.VSeparator(), expand=False, fill=False,
padding=controlbox_padding)
# search box
self.search_field = SearchField(_("Search for torrents"), self.visit_url)
sfa = gtk.Alignment(xalign=0, yalign=0.5, xscale=1, yscale=0)
sfa.add(self.search_field)
self.controlbox.pack_start(sfa,
expand=False, fill=False, padding=controlbox_padding)
# separator
self.controlbox.pack_start(gtk.VSeparator(), expand=False, fill=False,
padding=controlbox_padding)
# status light
self.status_light = StatusLight(self)
self.controlbox.pack_start(malign(self.status_light),
expand=False, fill=False)
self.box2.pack_start(self.controlbox,
expand=False, fill=False, padding=0)
# end control box
self.paned = gtk.VPaned()
self.knownscroll = ScrolledWindow()
self.knownscroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.knownscroll.set_shadow_type(gtk.SHADOW_NONE)
self.knownscroll.set_size_request(-1, SPACING)
self.knownbox = KnownBox(self)
self.knownbox.set_border_width(SPACING)
self.knownscroll.add_with_viewport(self.knownbox)
self.paned.pack1(self.knownscroll, resize=False, shrink=True)
self.mainscroll = AutoScrollingWindow()
self.mainscroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.mainscroll.set_shadow_type(gtk.SHADOW_NONE)
self.mainscroll.set_size_request(-1, SPACING)
self.scrollbox = RunningAndQueueBox(self, homogeneous=False)
self.scrollbox.set_border_width(SPACING)
self.runbox = RunningBox(self)
self.scrollbox.pack_start(self.runbox, expand=False, fill=False)
self.scrollbox.pack_start(DroppableHSeparator(self.scrollbox), expand=False, fill=False)
self.queuebox = QueuedBox(self)
self.scrollbox.pack_start(self.queuebox, expand=False, fill=False)
self.scrollbox.pack_start(SpacerBox(self), expand=True, fill=True)
self.mainscroll.add_with_viewport(self.scrollbox)
self.paned.pack2(self.mainscroll, resize=True, shrink=False)
self.box1.pack_start(self.paned)
self.box1.show_all()
self.mainwindow.add(self.box1)
self.set_title()
self.set_size()
self.mainwindow.show()
self.paned.set_position(0)
self.search_field.grab_focus()
self.updater = NewVersion.Updater(
gtk_wrap,
self.new_version,
self.torrentqueue.start_new_torrent,
self.confirm_install_new_version ,
self.global_error ,
self.config['new_version'] ,
self.config['current_version'] )
self.nag()
gtk.gdk.threads_leave()
def window_event(self, widget, event, *args):
if event.changed_mask == gtk.gdk.WINDOW_STATE_ICONIFIED:
if self.config['minimize_to_tray']:
if self.iconized == False:
self.mainwindow.hide()
self.trayicon.set_toggle_state(self.iconized)
self.iconized = not self.iconized
def drag_leave(self, *args):
self.drag_end()
def make_new_torrent(self, widget=None):
btspawn(self.torrentqueue, 'maketorrent')
def accept_dropped_file(self, widget, context, x, y, selection,
targetType, time):
if targetType == EXTERNAL_FILE_TYPE:
d = selection.data.strip()
file_uris = d.split('\r\n')
for file_uri in file_uris:
# this catches non-url entries, I've seen "\x00" at the end of lists
if file_uri.find(':/') != -1:
file_name = zurllib.url2pathname(file_uri)
file_name = file_name[7:]
if os.name == 'nt':
file_name = file_name.strip('\\')
self.open_torrent( file_name )
elif targetType == EXTERNAL_STRING_TYPE:
data = selection.data.strip()
# size must be > 0,0 for the intersection code to register it
drop_rect = gtk.gdk.Rectangle(x, y, 1, 1)
if ((self.search_field.intersect(drop_rect) is not None) and
(not data.lower().endswith(".torrent"))):
client_point = self.mainwindow.translate_coordinates(self.search_field, x, y)
layout_offset = self.search_field.get_layout_offsets()
point = []
# subtract (not add) the offset, because we're hit-testing the layout, not the widget
point.append(client_point[0] - layout_offset[0])
point.append(client_point[1] - layout_offset[1])
# ha ha ha. pango is so ridiculous
point[0] *= pango.SCALE
point[1] *= pango.SCALE
layout = self.search_field.get_layout()
position = layout.xy_to_index(*point)
self.search_field.insert_text(data, position[0])
else:
self.open_url(data)
def drag_highlight(self, widget=None):
widgets = (self.knownbox, self.runbox, self.queuebox)
for w in widgets:
if w != widget:
w.drag_unhighlight()
for w in widgets:
if w == widget:
w.drag_highlight()
self.add_unhighlight_handle()
def drag_end(self):
self.drag_highlight(widget=None)
self.mainscroll.stop_scrolling()
def set_title(self, torrentName=None, fractionDone=None):
title = app_name
trunc = '...'
sep = ': '
if self.config['pause']:
title += sep+_("(stopped)")
elif len(self.running_torrents) == 1 and torrentName and \
fractionDone is not None:
maxlen = WINDOW_TITLE_LENGTH - len(app_name) - len(trunc) - len(sep)
if len(torrentName) > maxlen:
torrentName = torrentName[:maxlen] + trunc
title = '%s%s%0.1f%%%s%s'% (app_name,
sep,
(int(fractionDone*1000)/10),
sep,
torrentName)
elif len(self.running_torrents) > 1:
title += sep+_("(multiple)")
if self.mainwindow.get_title() != title:
self.mainwindow.set_title(title)
if self.trayicon.get_tooltip() != title:
self.trayicon.set_tooltip(title)
def _guess_size(self):
paned_height = self.scrollbox.size_request()[1]
if hasattr(self.paned, 'style_get_property'):
paned_height += self.paned.style_get_property('handle-size')
else:
paned_height += 5
paned_height += self.paned.get_position()
paned_height += 4 # fudge factor, probably from scrolled window beveling ?
paned_height = max(paned_height, MIN_MULTI_PANE_HEIGHT)
new_height = self.menubar.size_request()[1] + \
self.box2.size_request()[1] + \
paned_height
new_height = min(new_height, MAX_WINDOW_HEIGHT)
new_width = max(self.scrollbox.size_request()[0] + SCROLLBAR_WIDTH, WINDOW_WIDTH)
return new_width, new_height
def set_size(self):
if not self.custom_size:
self.mainwindow.resize(*self._guess_size())
def size_was_allocated(self, *args):
current_size = self.mainwindow.get_size()
target_size = self._guess_size()
if current_size == target_size:
self.set_custom_size(False)
else:
self.set_custom_size(True)
def resize_to_fit(self):
self.set_custom_size(False)
self.set_size()
def set_custom_size(self, val):
self.custom_size = val
# BUG this is a hack:
self.viewmenu.get_submenu().get_children()[1].set_sensitive(val)
# BUG need to add handler on resize event to keep track of
# old_position when pane is hidden manually
def split_pane(self):
pos = self.paned.get_position()
if pos > 0:
self.paned.old_position = pos
self.paned.set_position(0)
else:
if hasattr(self.paned, 'old_position'):
self.paned.set_position(self.paned.old_position)
else:
self.maximize_known_pane()
def maximize_known_pane(self):
self.set_pane_position(self.knownbox.size_request()[1])
def set_pane_position(self, pane_position):
pane_position = min(MAX_WINDOW_HEIGHT//2, pane_position)
self.paned.set_position(pane_position)
def toggle_known(self, widget=None):
self.split_pane()
def open_window(self, window_name, *args, **kwargs):
if os.name == 'nt':
self.mainwindow.present()
savewidget = SaveFileSelection
if window_name == 'savedir':
savewidget = CreateFolderSelection
window_name = 'savefile'
if self.child_windows.has_key(window_name):
if window_name == 'savefile':
kwargs['show'] = False
self.postponed_save_windows.append(savewidget(self, **kwargs))
return
if window_name == 'log' :
self.child_windows[window_name] = LogWindow(self, self.logbuffer, self.config)
elif window_name == 'about' :
self.child_windows[window_name] = AboutWindow(self, lambda w: self.donate())
elif window_name == 'help' :
self.child_windows[window_name] = HelpWindow(self, makeHelp('bittorrent', defaults))
elif window_name == 'settings':
self.child_windows[window_name] = SettingsWindow(self, self.config, self.set_config)
elif window_name == 'version' :
self.child_windows[window_name] = VersionWindow(self, *args)
elif window_name == 'openfile':
self.child_windows[window_name] = OpenFileSelection(self, **kwargs)
elif window_name == 'savefile':
self.child_windows[window_name] = savewidget(self, **kwargs)
elif window_name == 'choosefolder':
self.child_windows[window_name] = ChooseFolderSelection(self, **kwargs)
elif window_name == 'enterurl':
self.child_windows[window_name] = EnterUrlDialog(self, **kwargs)
return self.child_windows[window_name]
def window_closed(self, window_name):
if self.child_windows.has_key(window_name):
del self.child_windows[window_name]
if window_name == 'savefile' and self.postponed_save_windows:
newwin = self.postponed_save_windows.pop(-1)
newwin.show()
self.child_windows['savefile'] = newwin
def close_window(self, window_name):
self.child_windows[window_name].close(None)
def new_version(self, newversion, download_url):
if not self.config['notified'] or \
newversion != NewVersion.Version.from_str(self.config['notified']):
if not self.torrents.has_key(self.updater.infohash):
self.open_window('version', newversion, download_url)
else:
dlpath = os.path.split(self.torrents[self.updater.infohash].dlpath)[0]
self.updater.set_installer_dir(dlpath)
self.updater.start_install()
def check_version(self):
self.updater.check()
def start_auto_update(self):
if not self.torrents.has_key(self.updater.infohash):
self.updater.download()
else:
self.global_error(INFO, _("Already downloading %s installer") % self.updater.version)
def confirm_install_new_version(self):
MessageDialog(self.mainwindow,
_("Install new %s now?")%app_name,
_("Do you want to quit %s and install the new version, "
"%s, now?")%(app_name,self.updater.version),
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_YES_NO,
yesfunc=self.install_new_version,
nofunc=None,
default=gtk.RESPONSE_YES
)
def install_new_version(self):
self.updater.launch_installer(self.torrentqueue)
self.cancel()
def open_help(self,widget):
if self.helpwindow is None:
msg = (_("%s help is at \n%s\nWould you like to go there now?")%
(app_name, HELP_URL))
self.helpwindow = MessageDialog(self.mainwindow,
_("Visit help web page?"),
msg,
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_OK_CANCEL,
yesfunc=self.visit_help,
nofunc =self.help_closed,
default=gtk.RESPONSE_OK
)
def visit_help(self):
self.visit_url(HELP_URL)
self.help_closed()
def close_help(self):
self.helpwindow.close()
def help_closed(self, widget=None):
self.helpwindow = None
def set_config(self, option, value):
self.config[option] = value
if option == 'display_interval':
self.init_updates()
self.torrentqueue.set_config(option, value)
def confirm_remove_finished_torrents(self,widget):
count = 0
for infohash, t in self.torrents.iteritems():
if t.state == KNOWN and t.completion >= 1:
count += 1
if count:
if self.paned.get_position() == 0:
self.toggle_known()
msg = ''
if count == 1:
msg = _("There is one finished torrent in the list. ") + \
_("Do you want to remove it?")
else:
msg = _("There are %d finished torrents in the list. ") % count +\
_("Do you want to remove all of them?")
MessageDialog(self.mainwindow,
_("Remove all finished torrents?"),
msg,
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_OK_CANCEL,
yesfunc=self.remove_finished_torrents,
default=gtk.RESPONSE_OK)
else:
MessageDialog(self.mainwindow,
_("No finished torrents"),
_("There are no finished torrents to remove."),
type=gtk.MESSAGE_INFO,
default=gtk.RESPONSE_OK)
def remove_finished_torrents(self):
for infohash, t in self.torrents.iteritems():
if t.state == KNOWN and t.completion >= 1:
self.torrentqueue.remove_torrent(infohash)
if self.paned.get_position() > 0:
self.toggle_known()
def cancel(self, widget=None):
for window_name in self.child_windows.keys():
self.close_window(window_name)
if self.errordialog is not None:
self.errordialog.destroy()
self.errors_closed()
for t in self.torrents.itervalues():
if t.widget is not None:
t.widget.close_child_windows()
self.torrentqueue.set_done()
gtk.main_quit()
# Currently called if the user started bittorrent from a terminal
# and presses ctrl-c there, or if the user quits BitTorrent from
# the tray icon (on windows)
def quit(self):
self.mainwindow.destroy()
def make_statusrequest(self):
if self.config['pause']:
return True
for infohash, t in self.running_torrents.iteritems():
self.torrentqueue.request_status(infohash, t.widget.peerlistwindow
is not None, t.widget.filelistwindow is not None)
if not len(self.running_torrents):
self.status_light.send_message('empty')
return True
def enter_url_to_open(self, widget):
self.open_window('enterurl')
def open_url(self, url):
self.torrentqueue.start_new_torrent_by_name(url)
def select_torrent_to_open(self, widget):
open_location = self.config['open_from']
if not open_location:
open_location = self.config['save_in']
path = smart_dir(open_location)
self.open_window('openfile',
title=_("Open torrent:"),
fullname=path,
got_location_func=self.open_torrent,
no_location_func=lambda: self.window_closed('openfile'))
def open_torrent(self, name):
self.window_closed('openfile')
open_location = os.path.split(name)[0]
if open_location[-1] != os.sep:
open_location += os.sep
self.set_config('open_from', open_location)
self.torrentqueue.start_new_torrent_by_name(name)
def change_save_location(self, infohash):
def no_location():
self.window_closed('savefile')
t = self.torrents[infohash]
metainfo = t.metainfo
selector = self.open_window(metainfo.is_batch and 'savedir' or \
'savefile',
title=_("Change save location for ") + metainfo.name,
fullname=t.dlpath,
got_location_func = \
lambda fn: self.got_changed_location(infohash, fn),
no_location_func=no_location)
def got_changed_location(self, infohash, fullpath):
self.window_closed('savefile')
self.torrentqueue.set_save_location(infohash, fullpath)
def save_location(self, infohash, metainfo):
name = metainfo.name_fs
if self.config['save_as'] and \
os.access(os.path.split(self.config['save_as'])[0], os.W_OK):
path = self.config['save_as']
self.got_location(infohash, path, store_in_config=False)
self.config['save_as'] = ''
return
path = smart_dir(self.config['save_in'])
fullname = os.path.join(path, name)
if not self.config['ask_for_save']:
if os.access(fullname, os.F_OK):
message = MessageDialog(self.mainwindow,
_("File exists!"),
_('"%s" already exists. '
"Do you want to choose a different file name?") % path_wrap(name),
buttons=gtk.BUTTONS_YES_NO,
nofunc= lambda : self.got_location(infohash, fullname),
yesfunc=lambda : self.get_save_location(infohash, metainfo, fullname),
default=gtk.RESPONSE_NO)
else:
self.got_location(infohash, fullname)
else:
self.get_save_location(infohash, metainfo, fullname)
def get_save_location(self, infohash, metainfo, fullname):
def no_location():
self.window_closed('savefile')
self.torrentqueue.remove_torrent(infohash)
selector = self.open_window(metainfo.is_batch and 'savedir' or \
'savefile',
title=_("Save location for ") + metainfo.name,
fullname=fullname,
got_location_func = lambda fn: \
self.got_location(infohash, fn),
no_location_func=no_location)
self.torrents[infohash].widget = selector
def got_location(self, infohash, fullpath, store_in_config=True):
self.window_closed('savefile')
self.torrents[infohash].widget = None
save_in = os.path.split(fullpath)[0]
metainfo = self.torrents[infohash].metainfo
if metainfo.is_batch:
bottom_dirs, top_dir_name = os.path.split(save_in)
if metainfo.name_fs == top_dir_name:
message = MessageDialog(self.mainwindow, _("Directory exists!"),
_('"%s" already exists.'\
" Do you intend to create an identical,"\
" duplicate directory inside the existing"\
" directory?")%path_wrap(save_in),
buttons=gtk.BUTTONS_YES_NO,
nofunc =lambda : self.got_location(infohash, save_in ),
yesfunc=lambda : self._got_location(infohash, save_in, fullpath, store_in_config=store_in_config),
default=gtk.RESPONSE_NO,
)
return
self._got_location(infohash, save_in, fullpath, store_in_config=store_in_config)
def _got_location(self, infohash, save_in, fullpath, store_in_config=True):
if store_in_config:
if save_in[-1] != os.sep:
save_in += os.sep
self.set_config('save_in', save_in)
self.torrents[infohash].dlpath = fullpath
self.torrentqueue.set_save_location(infohash, fullpath)
def add_unhighlight_handle(self):
if self.unhighlight_handle is not None:
gobject.source_remove(self.unhighlight_handle)
self.unhighlight_handle = gobject.timeout_add(2000,
self.unhighlight_after_a_while,
priority=gobject.PRIORITY_LOW)
def unhighlight_after_a_while(self):
self.drag_highlight()
gobject.source_remove(self.unhighlight_handle)
self.unhighlight_handle = None
return False
def init_updates(self):
if self.update_handle is not None:
gobject.source_remove(self.update_handle)
self.update_handle = gobject.timeout_add(
int(self.config['display_interval'] * 1000),
self.make_statusrequest)
def remove_torrent_widget(self, infohash):
t = self.torrents[infohash]
self.lists[t.state].remove(infohash)
if t.state == RUNNING:
del self.running_torrents[infohash]
self.set_title()
if t.state == ASKING_LOCATION:
if t.widget is not None:
t.widget.destroy()
return
if t.state in (KNOWN, RUNNING, QUEUED):
t.widget.close_child_windows()
if t.state == RUNNING:
self.runbox.remove(t.widget)
elif t.state == QUEUED:
self.queuebox.remove(t.widget)
elif t.state == KNOWN:
self.knownbox.remove(t.widget)
t.widget.destroy()
self.set_size()
def create_torrent_widget(self, infohash, queuepos=None):
t = self.torrents[infohash]
l = self.lists.setdefault(t.state, [])
if queuepos is None:
l.append(infohash)
else:
l.insert(queuepos, infohash)
if t.state == ASKING_LOCATION:
self.save_location(infohash, t.metainfo)
self.nag()
return
elif t.state == RUNNING:
self.running_torrents[infohash] = t
if not self.config['pause']:
t.widget = RunningTorrentBox(infohash, t.metainfo, t.dlpath,
t.completion, self)
else:
t.widget = PausedTorrentBox(infohash, t.metainfo, t.dlpath,
t.completion, self)
box = self.runbox
elif t.state == QUEUED:
t.widget = QueuedTorrentBox(infohash, t.metainfo, t.dlpath,
t.completion, self)
box = self.queuebox
elif t.state == KNOWN:
t.widget = KnownTorrentBox(infohash, t.metainfo, t.dlpath,
t.completion, self)
box = self.knownbox
box.pack_start(t.widget, expand=False, fill=False)
if queuepos is not None:
box.reorder_child(t.widget, queuepos)
self.set_size()
def log_text(self, text, severity=ERROR):
self.logbuffer.log_text(text, severity)
if self.child_windows.has_key('log'):
self.child_windows['log'].scroll_to_end()
def _error(self, severity, err_str):
err_str = err_str.decode('utf-8', 'replace').encode('utf-8')
err_str = err_str.strip()
if severity >= ERROR:
self.error_modal(err_str)
self.log_text(err_str, severity)
def error(self, infohash, severity, text):
if self.torrents.has_key(infohash):
name = self.torrents[infohash].metainfo.name
err_str = '"%s" : %s'%(name,text)
self._error(severity, err_str)
else:
ihex = infohash.encode('hex')
err_str = '"%s" : %s'%(ihex,text)
self._error(severity, err_str)
self._error(WARNING, 'Previous error raised for invalid infohash: "%s"' % ihex)
def global_error(self, severity, text):
err_str = _("(global message) : %s")%text
self._error(severity, err_str)
def error_modal(self, text):
if self.child_windows.has_key('log'):
return
title = _("%s Error") % app_name
if self.errordialog is not None:
if not self.errordialog.multi:
self.errordialog.destroy()
self.errordialog = MessageDialog(self.mainwindow, title,
_("Multiple errors have occurred. "
"Click OK to view the error log."),
buttons=gtk.BUTTONS_OK_CANCEL,
yesfunc=self.multiple_errors_yes,
nofunc=self.errors_closed,
default=gtk.RESPONSE_OK
)
self.errordialog.multi = True
else:
# already showing the multi error dialog, so do nothing
pass
else:
self.errordialog = MessageDialog(self.mainwindow, title, text,
yesfunc=self.errors_closed,
default=gtk.RESPONSE_OK)
self.errordialog.multi = False
def multiple_errors_yes(self):
self.errors_closed()
self.open_window('log')
def errors_closed(self):
self.errordialog = None
def open_log(self):
self.open_window('log')
def stop_queue(self):
self.set_config('pause', True)
self.set_title()
self.status_light.send_message('stop')
self.set_seen_remote_connections(False)
self.set_seen_connections(False)
q = list(self.runbox.get_queue())
for infohash in q:
t = self.torrents[infohash]
self.remove_torrent_widget(infohash)
self.create_torrent_widget(infohash)
def restart_queue(self):
self.set_config('pause', False)
q = list(self.runbox.get_queue())
for infohash in q:
t = self.torrents[infohash]
self.remove_torrent_widget(infohash)
self.create_torrent_widget(infohash)
self.start_status_light()
def start_status_light(self):
if len(self.running_torrents):
self.status_light.send_message('start')
else:
self.status_light.send_message('empty')
def update_status(self, torrent, statistics):
if self.config['pause']:
self.status_light.send_message('start')
return
if self.seen_remote_connections:
self.status_light.send_message('seen_remote_peers')
elif self.seen_connections:
self.status_light.send_message('seen_peers')
else:
self.start_status_light()
self.running_torrents[torrent].widget.update_status(statistics)
if statistics.get('numPeers'):
self.set_seen_connections(seen=True)
if (not self.seen_remote_connections and
statistics.get('ever_got_incoming')):
self.set_seen_remote_connections(seen=True)
if self.updater is not None:
updater_infohash = self.updater.infohash
if self.torrents.has_key(updater_infohash):
updater_torrent = self.torrents[updater_infohash]
if updater_torrent.state == QUEUED:
self.change_torrent_state(updater_infohash, RUNNING,
index=0, replaced=0,
force_running=True)
def set_seen_remote_connections(self, seen=False):
if seen:
self.status_light.send_message('seen_remote_peers')
self.seen_remote_connections = seen
def set_seen_connections(self, seen=False):
if seen:
self.status_light.send_message('seen_peers')
self.seen_connections = seen
def new_displayed_torrent(self, infohash, metainfo, dlpath, state, config,
completion=None, uptotal=0, downtotal=0):
t = Struct()
t.metainfo = metainfo
t.dlpath = dlpath
t.state = state
t.config = config
t.completion = completion
t.uptotal = uptotal
t.downtotal = downtotal
t.widget = None
self.torrents[infohash] = t
self.create_torrent_widget(infohash)
def torrent_state_changed(self, infohash, dlpath, state, completion,
uptotal, downtotal, queuepos=None):
t = self.torrents[infohash]
self.remove_torrent_widget(infohash)
t.dlpath = dlpath
t.state = state
t.completion = completion
t.uptotal = uptotal
t.downtotal = downtotal
self.create_torrent_widget(infohash, queuepos)
def reorder_torrent(self, infohash, queuepos):
self.remove_torrent_widget(infohash)
self.create_torrent_widget(infohash, queuepos)
def update_completion(self, infohash, completion, files_left=None,
files_allocated=None):
t = self.torrents[infohash]
if files_left is not None and t.widget.filelistwindow is not None:
t.widget.filelistwindow.update(files_left, files_allocated)
def removed_torrent(self, infohash):
self.remove_torrent_widget(infohash)
del self.torrents[infohash]
def change_torrent_state(self, infohash, newstate, index=None,
replaced=None, force_running=False):
t = self.torrents[infohash]
pred = succ = None
if index is not None:
l = self.lists.setdefault(newstate, [])
if index > 0:
pred = l[index - 1]
if index < len(l):
succ = l[index]
self.torrentqueue.change_torrent_state(infohash, t.state, newstate,
pred, succ, replaced, force_running)
def finish(self, infohash):
t = self.torrents[infohash]
if t is None or t.state == KNOWN:
return
self.change_torrent_state(infohash, KNOWN)
def confirm_replace_running_torrent(self, infohash, replaced, index):
replace_func = lambda *args: self.change_torrent_state(infohash,
RUNNING, index, replaced)
add_func = lambda *args: self.change_torrent_state(infohash,
RUNNING, index, force_running=True)
moved_torrent = self.torrents[infohash]
if moved_torrent.state == RUNNING:
self.change_torrent_state(infohash, RUNNING, index)
return
if self.config['start_torrent_behavior'] == 'replace':
replace_func()
return
elif self.config['start_torrent_behavior'] == 'add':
add_func()
return
moved_torrent_name = moved_torrent.metainfo.name
confirm = MessageDialog(self.mainwindow,
_("Stop running torrent?"),
_('You are about to start "%s". Do you want to stop another running torrent as well?')%(moved_torrent_name),
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_YES_NO,
yesfunc=replace_func,
nofunc=add_func,
default=gtk.RESPONSE_YES)
def nag(self):
if ((self.config['donated'] != version) and
#(random.random() * NAG_FREQUENCY) < 1) and
False):
title = _("Have you donated?")
message = _("Welcome to the new version of %s. Have you donated?")%app_name
self.nagwindow = MessageDialog(self.mainwindow,
title,
message,
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_YES_NO,
yesfunc=self.nag_yes, nofunc=self.nag_no,
default=gtk.RESPONSE_NO)
def nag_no(self):
self.donate()
def nag_yes(self):
self.set_config('donated', version)
MessageDialog(self.mainwindow,
_("Thanks!"),
_("Thanks for donating! To donate again, "
'select "Donate" from the "Help" menu.'),
type=gtk.MESSAGE_INFO,
default=gtk.RESPONSE_OK
)
def donate(self):
self.visit_url(DONATE_URL)
def visit_url(self, url, callback=None):
t = threading.Thread(target=self._visit_url,
args=(url,callback))
t.setDaemon(True)
t.start()
def _visit_url(self, url, callback=None):
webbrowser.open(url)
if callback:
gtk_wrap(callback)
def toggle_shown(self):
if self.config['minimize_to_tray']:
if self.mainwindow.get_property('visible'):
self.mainwindow.hide()
else:
self.mainwindow.show_all()
else:
if not self.iconized:
self.mainwindow.iconify()
else:
self.mainwindow.deiconify()
def raiseerror(self, *args):
raise ValueError('test traceback behavior')
#this class provides a thin layer around the loop so that the main window
#doesn't have to run it. It protects againstexceptions in mainwindow creation
#preventing the loop from starting (and causing "The grey screen of BT")
class MainLoop:
def __init__(self):
self.mainwindow = None
self.started = 0
gtk.gdk.threads_init()
def set_mainwindow(self, mainwindow):
self.mainwindow = mainwindow
def run(self):
self.mainwindow.traythread.start()
gtk.gdk.threads_enter()
if self.mainwindow:
self.mainwindow.ssbutton.set_paused(self.mainwindow.config['pause'])
self.mainwindow.rate_slider_box.start()
self.mainwindow.init_updates()
try:
#the main loop has been started
self.started = 1
gtk.main()
except KeyboardInterrupt:
gtk.gdk.threads_leave()
if self.mainwindow:
self.mainwindow.torrentqueue.set_done()
raise
gtk.gdk.threads_leave()
def quit(self):
if self.mainwindow:
self.mainwindow.quit()
def btgui_exit_gtk(mainloop):
# if the main loop has never run, we have to run it to flush blocking threads
# if it has run, running it a second time will cause duplicate-destruction problems
if not mainloop.started:
# queue up a command to close the gui
gobject.idle_add(lock_wrap, mainloop.quit)
# run the main loop so we process all queued commands, then quit
mainloop.run()
if __name__ == '__main__':
mainloop = MainLoop()
# make sure we start the gtk loop once before we close
atexit.register(btgui_exit_gtk, mainloop)
torrentqueue = TorrentQueue.TorrentQueue(config, ui_options, ipc)
d = DownloadInfoFrame(config,TorrentQueue.ThreadWrappedQueue(torrentqueue))
mainloop.set_mainwindow(d)
global_log_func.logger = d.global_error
startflag = threading.Event()
dlthread = threading.Thread(target = torrentqueue.run,
args = (d, gtk_wrap, startflag))
dlthread.setDaemon(False)
dlthread.start()
startflag.wait()
# the wait may have been terminated because of an error
if torrentqueue.initialized == -1:
raise BTFailure(_("Could not start the TorrentQueue, see above for errors."))
torrentqueue.rawserver.install_sigint_handler()
for name in newtorrents:
d.torrentqueue.start_new_torrent_by_name(name)
try:
mainloop.run()
except KeyboardInterrupt:
# the gtk main loop is closed in MainLoop
sys.exit(1)
d.trayicon.disable()
|
galaxy001/libtorrent
|
BitTorrent-4.4.0/bittorrent.py
|
Python
|
mit
| 145,556
|
[
"VisIt"
] |
5eb7f4ade57f33e366f0eec7e957d4e4f5e3dfabe4be0c64d28f6ea07018c359
|
import numpy as np
try:
import scipy.optimize as opt
except ImportError:
pass
from ase.optimize.optimize import Optimizer
class Converged(Exception):
pass
class OptimizerConvergenceError(Exception):
pass
class SciPyOptimizer(Optimizer):
"""General interface for SciPy optimizers
Only the call to the optimizer is still needed
"""
def __init__(self, atoms, logfile='-', trajectory=None,
callback_always=False, alpha=70.0, master=None):
"""Initialize object
Parameters:
atoms: Atoms object
The Atoms object to relax.
trajectory: string
Pickle file used to store trajectory of atomic movement.
logfile: file object or str
If *logfile* is a string, a file with that name will be opened.
Use '-' for stdout.
callback_always: book
Should the callback be run after each force call (also in the
linesearch)
alpha: float
Initial guess for the Hessian (curvature of energy surface). A
conservative value of 70.0 is the default, but number of needed
steps to converge might be less if a lower value is used. However,
a lower value also means risk of instability.
master: boolean
Defaults to None, which causes only rank 0 to save files. If
set to true, this rank will save files.
"""
restart = None
Optimizer.__init__(self, atoms, restart, logfile, trajectory, master)
self.force_calls = 0
self.callback_always = callback_always
self.H0 = alpha
def x0(self):
"""Return x0 in a way SciPy can use
This class is mostly usable for subclasses wanting to redefine the
parameters (and the objective function)"""
return self.atoms.get_positions().reshape(-1)
def f(self, x):
"""Objective function for use of the optimizers"""
self.atoms.set_positions(x.reshape(-1, 3))
# Scale the problem as SciPy uses I as initial Hessian.
return self.atoms.get_potential_energy() / self.H0
def fprime(self, x):
"""Gradient of the objective function for use of the optimizers"""
self.atoms.set_positions(x.reshape(-1, 3))
self.force_calls += 1
if self.callback_always:
self.callback(x)
# Remember that forces are minus the gradient!
# Scale the problem as SciPy uses I as initial Hessian.
return - self.atoms.get_forces().reshape(-1) / self.H0
def callback(self, x):
"""Callback function to be run after each iteration by SciPy
This should also be called once before optimization starts, as SciPy
optimizers only calls it after each iteration, while ase optimizers
call something similar before as well.
"""
f = self.atoms.get_forces()
self.log(f)
self.call_observers()
if self.converged(f):
raise Converged
self.nsteps += 1
def run(self, fmax=0.05, steps=100000000):
self.fmax = fmax
# As SciPy does not log the zeroth iteration, we do that manually
self.callback(None)
try:
# Scale the problem as SciPy uses I as initial Hessian.
self.call_fmin(fmax / self.H0, steps)
except Converged:
pass
def dump(self, data):
pass
def load(self):
pass
def call_fmin(self, fmax, steps):
raise NotImplementedError
class SciPyFminCG(SciPyOptimizer):
"""Non-linear (Polak-Ribiere) conjugate gradient algorithm"""
def call_fmin(self, fmax, steps):
output = opt.fmin_cg(self.f,
self.x0(),
fprime=self.fprime,
#args=(),
gtol=fmax * 0.1, #Should never be reached
norm=np.inf,
#epsilon=
maxiter=steps,
full_output=1,
disp=0,
#retall=0,
callback=self.callback
)
warnflag = output[-1]
if warnflag == 2:
raise OptimizerConvergenceError('Warning: Desired error not necessarily achieved ' \
'due to precision loss')
class SciPyFminBFGS(SciPyOptimizer):
"""Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)"""
def call_fmin(self, fmax, steps):
output = opt.fmin_bfgs(self.f,
self.x0(),
fprime=self.fprime,
#args=(),
gtol=fmax * 0.1, #Should never be reached
norm=np.inf,
#epsilon=1.4901161193847656e-08,
maxiter=steps,
full_output=1,
disp=0,
#retall=0,
callback=self.callback
)
warnflag = output[-1]
if warnflag == 2:
raise OptimizerConvergenceError('Warning: Desired error not necessarily achieved' \
'due to precision loss')
class SciPyGradientlessOptimizer(Optimizer):
"""General interface for gradient less SciPy optimizers
Only the call to the optimizer is still needed
Note: If you redefine x0() and f(), you don't even need an atoms object.
Redefining these also allows you to specify an arbitrary objective
function.
XXX: This is still a work in progress
"""
def __init__(self, atoms, logfile='-', trajectory=None,
callback_always=False, master=None):
"""Initialize object
Parameters:
atoms: Atoms object
The Atoms object to relax.
trajectory: string
Pickle file used to store trajectory of atomic movement.
logfile: file object or str
If *logfile* is a string, a file with that name will be opened.
Use '-' for stdout.
callback_always: book
Should the callback be run after each force call (also in the
linesearch)
alpha: float
Initial guess for the Hessian (curvature of energy surface). A
conservative value of 70.0 is the default, but number of needed
steps to converge might be less if a lower value is used. However,
a lower value also means risk of instability.
master: boolean
Defaults to None, which causes only rank 0 to save files. If
set to true, this rank will save files.
"""
restart = None
Optimizer.__init__(self, atoms, restart, logfile, trajectory, master)
self.function_calls = 0
self.callback_always = callback_always
def x0(self):
"""Return x0 in a way SciPy can use
This class is mostly usable for subclasses wanting to redefine the
parameters (and the objective function)"""
return self.atoms.get_positions().reshape(-1)
def f(self, x):
"""Objective function for use of the optimizers"""
self.atoms.set_positions(x.reshape(-1, 3))
self.function_calls += 1
# Scale the problem as SciPy uses I as initial Hessian.
return self.atoms.get_potential_energy()
def callback(self, x):
"""Callback function to be run after each iteration by SciPy
This should also be called once before optimization starts, as SciPy
optimizers only calls it after each iteration, while ase optimizers
call something similar before as well.
"""
# We can't assume that forces are available!
#f = self.atoms.get_forces()
#self.log(f)
self.call_observers()
#if self.converged(f):
# raise Converged
self.nsteps += 1
def run(self, ftol=0.01, xtol=0.01, steps=100000000):
self.xtol = xtol
self.ftol = ftol
# As SciPy does not log the zeroth iteration, we do that manually
self.callback(None)
try:
# Scale the problem as SciPy uses I as initial Hessian.
self.call_fmin(xtol, ftol, steps)
except Converged:
pass
def dump(self, data):
pass
def load(self):
pass
def call_fmin(self, fmax, steps):
raise NotImplementedError
class SciPyFmin(SciPyGradientlessOptimizer):
"""Nelder-Mead Simplex algorithm
Uses only function calls.
XXX: This is still a work in progress
"""
def call_fmin(self, xtol, ftol, steps):
output = opt.fmin(self.f,
self.x0(),
#args=(),
xtol=xtol,
ftol=ftol,
maxiter=steps,
#maxfun=None,
#full_output=1,
disp=0,
#retall=0,
callback=self.callback
)
class SciPyFminPowell(SciPyGradientlessOptimizer):
"""Powell's (modified) level set method
Uses only function calls.
XXX: This is still a work in progress
"""
def __init__(self, *args, **kwargs):
"""Parameters:
direc: float
How much to change x to initially. Defaults to 0.04.
"""
direc = kwargs.pop('direc', None)
SciPyGradientlessOptimizer.__init__(self, *args, **kwargs)
if direc is None:
self.direc = np.eye(len(self.x0()), dtype=float) * 0.04
else:
self.direc = np.eye(len(self.x0()), dtype=float) * direc
def call_fmin(self, xtol, ftol, steps):
output = opt.fmin_powell(self.f,
self.x0(),
#args=(),
xtol=xtol,
ftol=ftol,
maxiter=steps,
#maxfun=None,
#full_output=1,
disp=0,
#retall=0,
callback=self.callback,
direc=self.direc
)
|
suttond/MODOI
|
ase/optimize/sciopt.py
|
Python
|
lgpl-3.0
| 10,601
|
[
"ASE"
] |
434eb7b1f2bf6a49ee9f72f8648cb1076edb1577a195a729abdfc4d3350ab5cc
|
# Copyright 2013 by Christian Brueffer. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the multiple sequence alignment program MSAProbs.
"""
from __future__ import print_function
__docformat__ = "restructuredtext en" # Don't just use plain text in epydoc API pages!
from Bio.Application import _Argument, _Option, _Switch, AbstractCommandline
class MSAProbsCommandline(AbstractCommandline):
"""Command line wrapper for MSAProbs.
http://msaprobs.sourceforge.net
Example:
--------
>>> from Bio.Align.Applications import MSAProbsCommandline
>>> in_file = "unaligned.fasta"
>>> out_file = "aligned.cla"
>>> cline = MSAProbsCommandline(infile=in_file, outfile=out_file, clustalw=True)
>>> print(cline)
msaprobs -o aligned.cla -clustalw unaligned.fasta
You would typically run the command line with cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Citation:
---------
Yongchao Liu, Bertil Schmidt, Douglas L. Maskell: "MSAProbs: multiple
sequence alignment based on pair hidden Markov models and partition
function posterior probabilities". Bioinformatics, 2010, 26(16): 1958 -1964
Last checked against version: 0.9.7
"""
def __init__(self, cmd="msaprobs", **kwargs):
# order of parameters is the same as in msaprobs -help
self.parameters = \
[
_Option(["-o", "--outfile", "outfile"],
"specify the output file name (STDOUT by default)",
filename=True,
equate=False),
_Option(["-num_threads", "numthreads"],
"specify the number of threads used, and otherwise detect automatically",
checker_function=lambda x: isinstance(x, int)),
_Switch(["-clustalw", "clustalw"],
"use CLUSTALW output format instead of FASTA format"),
_Option(["-c", "consistency"],
"use 0 <= REPS <= 5 (default: 2) passes of consistency transformation",
checker_function=lambda x: isinstance(x, int) and 0 <= x <= 5),
_Option(["-ir", "--iterative-refinement", "iterative_refinement"],
"use 0 <= REPS <= 1000 (default: 10) passes of iterative-refinement",
checker_function=lambda x: isinstance(x, int) and 0 <= x <= 1000),
_Switch(["-v", "verbose"],
"report progress while aligning (default: off)"),
_Option(["-annot", "annot"],
"write annotation for multiple alignment to FILENAME",
filename=True),
_Switch(["-a", "--alignment-order", "alignment_order"],
"print sequences in alignment order rather than input order (default: off)"),
_Option(["-version", "version"],
"print out version of MSAPROBS"),
_Argument(["infile"],
"Multiple sequence input file",
filename=True),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
def _test():
"""Run the module's doctests (PRIVATE)."""
print("Running MSAProbs doctests...")
import doctest
doctest.testmod()
print("Done")
if __name__ == "__main__":
_test()
|
updownlife/multipleK
|
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Align/Applications/_MSAProbs.py
|
Python
|
gpl-2.0
| 3,498
|
[
"Biopython"
] |
05d4ec12d5c02db6ac357322ca551950a3107ed3462b39c6eebcaa77fdf73d70
|
"""
Support the ISY-994 controllers.
For configuration details please visit the documentation for this component at
https://home-assistant.io/components/isy994/
"""
import asyncio
from collections import namedtuple
import logging
from urllib.parse import urlparse
import voluptuous as vol
from homeassistant.core import HomeAssistant # noqa
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import discovery, config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, Dict # noqa
REQUIREMENTS = ['PyISY==1.1.0']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'isy994'
CONF_IGNORE_STRING = 'ignore_string'
CONF_SENSOR_STRING = 'sensor_string'
CONF_ENABLE_CLIMATE = 'enable_climate'
CONF_TLS_VER = 'tls'
DEFAULT_IGNORE_STRING = '{IGNORE ME}'
DEFAULT_SENSOR_STRING = 'sensor'
KEY_ACTIONS = 'actions'
KEY_FOLDER = 'folder'
KEY_MY_PROGRAMS = 'My Programs'
KEY_STATUS = 'status'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.url,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_TLS_VER): vol.Coerce(float),
vol.Optional(CONF_IGNORE_STRING,
default=DEFAULT_IGNORE_STRING): cv.string,
vol.Optional(CONF_SENSOR_STRING,
default=DEFAULT_SENSOR_STRING): cv.string,
vol.Optional(CONF_ENABLE_CLIMATE,
default=True): cv.boolean
})
}, extra=vol.ALLOW_EXTRA)
# Do not use the Hass consts for the states here - we're matching exact API
# responses, not using them for Hass states
NODE_FILTERS = {
'binary_sensor': {
'uom': [],
'states': [],
'node_def_id': ['BinaryAlarm'],
'insteon_type': ['16.'] # Does a startswith() match; include the dot
},
'sensor': {
# This is just a more-readable way of including MOST uoms between 1-100
# (Remember that range() is non-inclusive of the stop value)
'uom': (['1'] +
list(map(str, range(3, 11))) +
list(map(str, range(12, 51))) +
list(map(str, range(52, 66))) +
list(map(str, range(69, 78))) +
['79'] +
list(map(str, range(82, 97)))),
'states': [],
'node_def_id': ['IMETER_SOLO'],
'insteon_type': ['9.0.', '9.7.']
},
'lock': {
'uom': ['11'],
'states': ['locked', 'unlocked'],
'node_def_id': ['DoorLock'],
'insteon_type': ['15.']
},
'fan': {
'uom': [],
'states': ['on', 'off', 'low', 'medium', 'high'],
'node_def_id': ['FanLincMotor'],
'insteon_type': ['1.46.']
},
'cover': {
'uom': ['97'],
'states': ['open', 'closed', 'closing', 'opening', 'stopped'],
'node_def_id': [],
'insteon_type': []
},
'light': {
'uom': ['51'],
'states': ['on', 'off', '%'],
'node_def_id': ['DimmerLampSwitch', 'DimmerLampSwitch_ADV',
'DimmerSwitchOnly', 'DimmerSwitchOnly_ADV',
'DimmerLampOnly', 'BallastRelayLampSwitch',
'BallastRelayLampSwitch_ADV', 'RelayLampSwitch',
'RemoteLinc2', 'RemoteLinc2_ADV'],
'insteon_type': ['1.']
},
'switch': {
'uom': ['2', '78'],
'states': ['on', 'off'],
'node_def_id': ['OnOffControl', 'RelayLampSwitch',
'RelayLampSwitch_ADV', 'RelaySwitchOnlyPlusQuery',
'RelaySwitchOnlyPlusQuery_ADV', 'RelayLampOnly',
'RelayLampOnly_ADV', 'KeypadButton',
'KeypadButton_ADV', 'EZRAIN_Input', 'EZRAIN_Output',
'EZIO2x4_Input', 'EZIO2x4_Input_ADV', 'BinaryControl',
'BinaryControl_ADV', 'AlertModuleSiren',
'AlertModuleSiren_ADV', 'AlertModuleArmed', 'Siren',
'Siren_ADV'],
'insteon_type': ['2.', '9.10.', '9.11.']
}
}
SUPPORTED_DOMAINS = ['binary_sensor', 'sensor', 'lock', 'fan', 'cover',
'light', 'switch']
SUPPORTED_PROGRAM_DOMAINS = ['binary_sensor', 'lock', 'fan', 'cover', 'switch']
# ISY Scenes are more like Swithes than Hass Scenes
# (they can turn off, and report their state)
SCENE_DOMAIN = 'switch'
ISY994_NODES = "isy994_nodes"
ISY994_WEATHER = "isy994_weather"
ISY994_PROGRAMS = "isy994_programs"
WeatherNode = namedtuple('WeatherNode', ('status', 'name', 'uom'))
def _check_for_node_def(hass: HomeAssistant, node,
single_domain: str=None) -> bool:
"""Check if the node matches the node_def_id for any domains.
This is only present on the 5.0 ISY firmware, and is the most reliable
way to determine a device's type.
"""
if not hasattr(node, 'node_def_id') or node.node_def_id is None:
# Node doesn't have a node_def (pre 5.0 firmware most likely)
return False
node_def_id = node.node_def_id
domains = SUPPORTED_DOMAINS if not single_domain else [single_domain]
for domain in domains:
if node_def_id in NODE_FILTERS[domain]['node_def_id']:
hass.data[ISY994_NODES][domain].append(node)
return True
return False
def _check_for_insteon_type(hass: HomeAssistant, node,
single_domain: str=None) -> bool:
"""Check if the node matches the Insteon type for any domains.
This is for (presumably) every version of the ISY firmware, but only
works for Insteon device. "Node Server" (v5+) and Z-Wave and others will
not have a type.
"""
if not hasattr(node, 'type') or node.type is None:
# Node doesn't have a type (non-Insteon device most likely)
return False
device_type = node.type
domains = SUPPORTED_DOMAINS if not single_domain else [single_domain]
for domain in domains:
if any([device_type.startswith(t) for t in
set(NODE_FILTERS[domain]['insteon_type'])]):
hass.data[ISY994_NODES][domain].append(node)
return True
return False
def _check_for_uom_id(hass: HomeAssistant, node,
single_domain: str=None, uom_list: list=None) -> bool:
"""Check if a node's uom matches any of the domains uom filter.
This is used for versions of the ISY firmware that report uoms as a single
ID. We can often infer what type of device it is by that ID.
"""
if not hasattr(node, 'uom') or node.uom is None:
# Node doesn't have a uom (Scenes for example)
return False
node_uom = set(map(str.lower, node.uom))
if uom_list:
if node_uom.intersection(NODE_FILTERS[single_domain]['uom']):
hass.data[ISY994_NODES][single_domain].append(node)
return True
else:
domains = SUPPORTED_DOMAINS if not single_domain else [single_domain]
for domain in domains:
if node_uom.intersection(NODE_FILTERS[domain]['uom']):
hass.data[ISY994_NODES][domain].append(node)
return True
return False
def _check_for_states_in_uom(hass: HomeAssistant, node,
single_domain: str=None,
states_list: list=None) -> bool:
"""Check if a list of uoms matches two possible filters.
This is for versions of the ISY firmware that report uoms as a list of all
possible "human readable" states. This filter passes if all of the possible
states fit inside the given filter.
"""
if not hasattr(node, 'uom') or node.uom is None:
# Node doesn't have a uom (Scenes for example)
return False
node_uom = set(map(str.lower, node.uom))
if states_list:
if node_uom == set(states_list):
hass.data[ISY994_NODES][single_domain].append(node)
return True
else:
domains = SUPPORTED_DOMAINS if not single_domain else [single_domain]
for domain in domains:
if node_uom == set(NODE_FILTERS[domain]['states']):
hass.data[ISY994_NODES][domain].append(node)
return True
return False
def _is_sensor_a_binary_sensor(hass: HomeAssistant, node) -> bool:
"""Determine if the given sensor node should be a binary_sensor."""
if _check_for_node_def(hass, node, single_domain='binary_sensor'):
return True
if _check_for_insteon_type(hass, node, single_domain='binary_sensor'):
return True
# For the next two checks, we're providing our own set of uoms that
# represent on/off devices. This is because we can only depend on these
# checks in the context of already knowing that this is definitely a
# sensor device.
if _check_for_uom_id(hass, node, single_domain='binary_sensor',
uom_list=['2', '78']):
return True
if _check_for_states_in_uom(hass, node, single_domain='binary_sensor',
states_list=['on', 'off']):
return True
return False
def _categorize_nodes(hass: HomeAssistant, nodes, ignore_identifier: str,
sensor_identifier: str)-> None:
"""Sort the nodes to their proper domains."""
# pylint: disable=no-member
for (path, node) in nodes:
ignored = ignore_identifier in path or ignore_identifier in node.name
if ignored:
# Don't import this node as a device at all
continue
from PyISY.Nodes import Group
if isinstance(node, Group):
hass.data[ISY994_NODES][SCENE_DOMAIN].append(node)
continue
if sensor_identifier in path or sensor_identifier in node.name:
# User has specified to treat this as a sensor. First we need to
# determine if it should be a binary_sensor.
if _is_sensor_a_binary_sensor(hass, node):
continue
else:
hass.data[ISY994_NODES]['sensor'].append(node)
continue
# We have a bunch of different methods for determining the device type,
# each of which works with different ISY firmware versions or device
# family. The order here is important, from most reliable to least.
if _check_for_node_def(hass, node):
continue
if _check_for_insteon_type(hass, node):
continue
if _check_for_uom_id(hass, node):
continue
if _check_for_states_in_uom(hass, node):
continue
def _categorize_programs(hass: HomeAssistant, programs: dict) -> None:
"""Categorize the ISY994 programs."""
for domain in SUPPORTED_PROGRAM_DOMAINS:
try:
folder = programs[KEY_MY_PROGRAMS]['HA.{}'.format(domain)]
except KeyError:
pass
else:
for dtype, _, node_id in folder.children:
if dtype == KEY_FOLDER:
entity_folder = folder[node_id]
try:
status = entity_folder[KEY_STATUS]
assert status.dtype == 'program', 'Not a program'
if domain != 'binary_sensor':
actions = entity_folder[KEY_ACTIONS]
assert actions.dtype == 'program', 'Not a program'
else:
actions = None
except (AttributeError, KeyError, AssertionError):
_LOGGER.warning("Program entity '%s' not loaded due "
"to invalid folder structure.",
entity_folder.name)
continue
entity = (entity_folder.name, status, actions)
hass.data[ISY994_PROGRAMS][domain].append(entity)
def _categorize_weather(hass: HomeAssistant, climate) -> None:
"""Categorize the ISY994 weather data."""
climate_attrs = dir(climate)
weather_nodes = [WeatherNode(getattr(climate, attr),
attr.replace('_', ' '),
getattr(climate, '{}_units'.format(attr)))
for attr in climate_attrs
if '{}_units'.format(attr) in climate_attrs]
hass.data[ISY994_WEATHER].extend(weather_nodes)
def setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the ISY 994 platform."""
hass.data[ISY994_NODES] = {}
for domain in SUPPORTED_DOMAINS:
hass.data[ISY994_NODES][domain] = []
hass.data[ISY994_WEATHER] = []
hass.data[ISY994_PROGRAMS] = {}
for domain in SUPPORTED_DOMAINS:
hass.data[ISY994_PROGRAMS][domain] = []
isy_config = config.get(DOMAIN)
user = isy_config.get(CONF_USERNAME)
password = isy_config.get(CONF_PASSWORD)
tls_version = isy_config.get(CONF_TLS_VER)
host = urlparse(isy_config.get(CONF_HOST))
ignore_identifier = isy_config.get(CONF_IGNORE_STRING)
sensor_identifier = isy_config.get(CONF_SENSOR_STRING)
enable_climate = isy_config.get(CONF_ENABLE_CLIMATE)
if host.scheme == 'http':
https = False
port = host.port or 80
elif host.scheme == 'https':
https = True
port = host.port or 443
else:
_LOGGER.error("isy994 host value in configuration is invalid")
return False
import PyISY
# Connect to ISY controller.
isy = PyISY.ISY(host.hostname, port, username=user, password=password,
use_https=https, tls_ver=tls_version, log=_LOGGER)
if not isy.connected:
return False
_categorize_nodes(hass, isy.nodes, ignore_identifier, sensor_identifier)
_categorize_programs(hass, isy.programs)
if enable_climate and isy.configuration.get('Weather Information'):
_categorize_weather(hass, isy.climate)
def stop(event: object) -> None:
"""Stop ISY auto updates."""
isy.auto_update = False
# Listen for HA stop to disconnect.
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
# Load platforms for the devices in the ISY controller that we support.
for component in SUPPORTED_DOMAINS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
isy.auto_update = True
return True
class ISYDevice(Entity):
"""Representation of an ISY994 device."""
_attrs = {}
_name = None # type: str
def __init__(self, node) -> None:
"""Initialize the insteon device."""
self._node = node
self._change_handler = None
self._control_handler = None
@asyncio.coroutine
def async_added_to_hass(self) -> None:
"""Subscribe to the node change events."""
self._change_handler = self._node.status.subscribe(
'changed', self.on_update)
if hasattr(self._node, 'controlEvents'):
self._control_handler = self._node.controlEvents.subscribe(
self.on_control)
# pylint: disable=unused-argument
def on_update(self, event: object) -> None:
"""Handle the update event from the ISY994 Node."""
self.schedule_update_ha_state()
def on_control(self, event: object) -> None:
"""Handle a control event from the ISY994 Node."""
self.hass.bus.fire('isy994_control', {
'entity_id': self.entity_id,
'control': event
})
@property
def unique_id(self) -> str:
"""Get the unique identifier of the device."""
# pylint: disable=protected-access
return self._node._id
@property
def name(self) -> str:
"""Get the name of the device."""
return self._name or str(self._node.name)
@property
def should_poll(self) -> bool:
"""No polling required since we're using the subscription."""
return False
@property
def value(self) -> int:
"""Get the current value of the device."""
# pylint: disable=protected-access
return self._node.status._val
def is_unknown(self) -> bool:
"""Get whether or not the value of this Entity's node is unknown.
PyISY reports unknown values as -inf
"""
return self.value == -1 * float('inf')
@property
def state(self):
"""Return the state of the ISY device."""
if self.is_unknown():
return None
else:
return super().state
@property
def device_state_attributes(self) -> Dict:
"""Get the state attributes for the device."""
attr = {}
if hasattr(self._node, 'aux_properties'):
for name, val in self._node.aux_properties.items():
attr[name] = '{} {}'.format(val.get('value'), val.get('uom'))
return attr
|
ewandor/home-assistant
|
homeassistant/components/isy994.py
|
Python
|
apache-2.0
| 16,941
|
[
"VisIt"
] |
96b5c2a0d70977e17e7106f88e11072a88ac788c7d086c996e45944a74b84f35
|
#!/usr/bin/env python
import numpy as np
import networkx as nx
###########################
### create scatter data ###
###########################
n = 6 # specify the number of observations for each cluster
np.random.seed(42) # provide a seed for the random number generator
feature1 = np.array([np.random.normal(2,1,n)]).transpose() # creates a colums vector from a gaussian for cluster 1
feature2 = np.array([np.random.normal(2,1,n)]).transpose() # creates a colums vector from a gaussian for cluster 1
cluster1 = np.hstack((feature1,feature2)) # combines the col vectors into one matrix
feature1 = np.array([np.random.normal(7,1,n)]).transpose() # creates a colums vector from a gaussian for cluster 1
feature2 = np.array([np.random.normal(7,1,n)]).transpose() # creates a colums vector from a gaussian for cluster 1
cluster2 = np.hstack((feature1,feature2)) # combines the col vectors into one matrix
dataScatter = np.vstack((cluster1,cluster2)) # combines the clusters
dataScatterLabels = np.zeros(n * 2,dtype=int) # specifies the actual labels
dataScatterLabels[-n:] = 1
#############################
### create circle data ###
#############################
n = 12
feature1 = np.array([np.random.normal(4.8,0.2,n)]).transpose()
feature2 = np.array([np.random.normal(5.1,0.2,n)]).transpose()
cluster1 = np.hstack((feature1,feature2))
cluster2 = np.array([[3.5,4.0],[3.2,4.5],[3.2, 5.0], [3.5,5.5], [3.8,6.0],[4.1,6.5],[4.5,7.0],[4.9,7.0],[5.3,6.8],[5.7,6.4],
[6.1,6.0],[6.3,5.6],[6.3,5.2],[6.2,4.8],[6.1,4.2],[5.8,3.8],[5.4,3.4],[5.0,3.1],[4.6,3.1],[4.2,3.3],[3.8,3.6]])
n1,d1 = np.shape(cluster1)
n2,d2 = np.shape(cluster2)
dataCircle = np.vstack([cluster1,cluster2])
dataCircleLabels = np.hstack([np.zeros(n1,dtype=int),np.zeros(n2,dtype=int)+1])
##############################
### create letters data ### [3.35,3.15]
##############################
cluster1 = np.array([[1.10,3.3],[1.18,3.3],[1.26,3.3],[1.26,3.2],[1.22,3.1],[1.14,3.1],[1.06,3.1],[0.98,3.1],
[0.90,3.1],[0.82,3.1],[0.75,3.15],[0.71,3.2],[0.7,3.3],[0.7,3.4],[0.7,3.5],[0.70,3.6],
[0.70,3.7],[0.73,3.8],[0.8,3.88],[0.9,3.9],[1.0,3.9],[1.07,3.88],[1.15,3.83],[1.21,3.75],[1.26,3.65]])
cluster2 = np.array([[2.0,3.9],[2.0,3.8],[2.0,3.7],[2.0,3.6],[2.0,3.5],[2.0,3.4],[2.0,3.3],[2.0,3.2],[2.0,3.1],
[2.06,3.8],[2.12,3.7],[2.18,3.6],[2.3,3.5],[2.3,3.4],[2.36,3.3],[2.42,3.2],[2.48,3.1],
[2.55,3.9],[2.55,3.8],[2.55,3.7],[2.55,3.6],[2.55,3.5],[2.55,3.4],[2.55,3.3],[2.55,3.2],[2.55,3.1]])
cluster3 = np.array([[3.3,3.9],[3.3,3.8],[3.3,3.7],[3.3,3.6],[3.3,3.5],[3.3,3.4],[3.3,3.3],[3.3,3.22],
[3.35,3.17],[3.45,3.1],[3.66,3.1],[3.75,3.17],
[3.8,3.9],[3.8,3.8],[3.8,3.7],[3.8,3.6],[3.8,3.5],[3.8,3.4],[3.8,3.3],[3.8,3.22]])
#cluster2[:,0] = cluster2[:,0] + 1.5
#cluster3[:,0] = cluster3[:,0] + 3.0
dataLetters = np.vstack([cluster1,cluster2,cluster3])
#dataLetters = np.vstack([cluster1,cluster2])
n1,d1 = np.shape(cluster1)
n2,d2 = np.shape(cluster2)
n3,d3 = np.shape(cluster3)
dataLettersLabels = np.hstack([np.zeros(n1,dtype=int),np.zeros(n2,dtype=int)+1,np.zeros(n3,dtype=int)+2])
#dataLettersLabels = np.hstack([np.zeros(n1,dtype=int),np.zeros(n2,dtype=int)+1])
#############################
### create network data 1 ###
#############################
group1 = ['1','2','3','4','5','6','7',"J"]
group2 = ['9','10','11','12','13','14','15',"K"]
G = nx.Graph()
G.add_edge('1','2')
G.add_edge('1','3')
G.add_edge('1','5')
G.add_edge('1','6')
G.add_edge('2','3')
G.add_edge('3','4')
G.add_edge('4','5')
G.add_edge('7','J')
G.add_edge('J','K')
G.add_edge('K','9')
G.add_edge('7','1')
G.add_edge('9','10')
G.add_edge('10','11')
G.add_edge('10','12')
G.add_edge('12','15')
G.add_edge('12','14')
G.add_edge('11','15')
G.add_edge('11','13')
G.add_edge('13','15')
dataNetwork = G
dataNetworkLabels = np.zeros(len(G.nodes()),dtype=int)
for i in range(len(G.nodes())):
n = G.nodes()[i]
if n in group2:
dataNetworkLabels[i] = 1
|
ajrichards/htsint
|
htsint/stats/ClusteringData.py
|
Python
|
bsd-3-clause
| 4,363
|
[
"Gaussian"
] |
7aabb94aa3885c5a79cb75c9e7911beb1b72d0314b1db591efe9a8e7dd3beb96
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import re
import sys
from .exceptions import *
from .libmintsgshell import *
if sys.version_info >= (3,0):
basestring = str
class Gaussian94BasisSetParser(object):
"""Class for parsing basis sets from a text file in Gaussian 94
format. Translated directly from the Psi4 libmints class written
by Justin M. Turney and Andrew C. Simmonett.
"""
def __init__(self, forced_puream=None):
"""Constructor"""
# If the parser needs to force spherical or cartesian (e.g., loading old guess)
self.force_puream_or_cartesian = False if forced_puream is None else True
# Is the forced value to use puream? (Otherwise force Cartesian).
self.forced_is_puream = False if forced_puream is None else forced_puream
# string filename
self.filename = None
def load_file(self, filename, basisname=None):
"""Load and return the file to be used by parse. Return only
portion of *filename* pertaining to *basisname* if specified (for
multi-basisset files) otherwise entire file as list of strings.
"""
# string filename
self.filename = filename
given_basisname = False if basisname is None else True
found_basisname = False
basis_separator = re.compile(r'^\s*\[\s*(.*?)\s*\]\s*$')
# Loads an entire file.
try:
infile = open(filename, 'r')
except IOError:
raise BasisSetFileNotFound("""BasisSetParser::parse: Unable to open basis set file: %s""" % (filename))
if os.stat(filename).st_size == 0:
raise ValidationError("""BasisSetParser::parse: given filename '%s' is blank.""" % (filename))
contents = infile.readlines()
lines = []
for text in contents:
text = text.strip()
# If no basisname was given always save the line.
if given_basisname is False:
lines.append(text)
if found_basisname:
# If we find another [*] we're done.
if basis_separator.match(text):
what = basis_separator.match(text).group(1)
break
lines.append(text)
continue
# If the user gave a basisname AND text matches the basisname we want to trigger to retain
if given_basisname and basis_separator.match(text):
if basisname == basis_separator.match(text).group(1):
found_basisname = True
return lines
def parse(self, symbol, dataset):
"""Given a string, parse for the basis set needed for atom.
* @param symbol atom symbol to look for in dataset
* @param dataset data set to look through
dataset can be list of lines or a single string which will be converted to list of lines
"""
if isinstance(dataset, basestring):
lines = dataset.split('\n')
else:
lines = dataset
# Regular expressions that we'll be checking for.
cartesian = re.compile(r'^\s*cartesian\s*', re.IGNORECASE)
spherical = re.compile(r'^\s*spherical\s*', re.IGNORECASE)
comment = re.compile(r'^\s*\!.*') # line starts with !
separator = re.compile(r'^\s*\*\*\*\*') # line starts with ****
ATOM = '(([A-Z]{1,3}\d*)|([A-Z]{1,3}_\w+))' # match 'C 0', 'Al c 0', 'P p88 p_pass 0' not 'Ofail 0', 'h99_text 0'
atom_array = re.compile(r'^\s*((' + ATOM + '\s+)+)0\s*$', re.IGNORECASE) # array of atomic symbols terminated by 0
shell = re.compile(r'^\s*(\w+)\s*(\d+)\s*(-?\d+\.\d+)') # Match beginning of contraction
blank = re.compile(r'^\s*$')
NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))"
primitives1 = re.compile(r'^\s*' + NUMBER + '\s+' + NUMBER + '.*') # Match s, p, d, f, g, ... functions
primitives2 = re.compile(r'^\s*' + NUMBER + '\s+' + NUMBER + '\s+' + NUMBER + '.*') # match sp functions
# s, p and s, p, d can be grouped together in Pople-style basis sets
sp = 'SP'
spd = 'SPD'
# a b c d e f g h i j k l m n o p q r s t u v w x y z
#shell_to_am = [-1,-1,-1, 2,-1, 3, 4, 5, 6,-1, 7, 8, 9,10,11, 1,12,13, 0,14,15,16,17,18,19,20]
alpha = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
angmo = [-1, -1, -1, 2, -1, 3, 4, 5, 6, -1, 7, 8,
9, 10, 11, 1, 12, 13, 0, 14, 15, 16, 17, 18, 19, 20]
shell_to_am = dict(zip(alpha, angmo))
# Basis type.
gaussian_type = 'Pure'
if self.force_puream_or_cartesian:
if self.forced_is_puream == False:
gaussian_type = 'Cartesian'
# Need a dummy center for the shell.
center = [0.0, 0.0, 0.0]
shell_list = []
lineno = 0
found = False
while lineno < len(lines):
line = lines[lineno]
lineno += 1
# Ignore blank lines
if blank.match(line):
continue
# Look for Cartesian or Spherical
if not self.force_puream_or_cartesian:
if cartesian.match(line):
gaussian_type = 'Cartesian'
#TODO if psi4.get_global_option('PUREAM').has_changed():
#TODO gaussian_type = 'Pure' if int(psi4.get_global('PUREAM')) else 'Cartesian'
continue
elif spherical.match(line):
gaussian_type = 'Pure'
#TODO if psi4.get_global_option('PUREAM').has_changed():
#TODO gaussian_type = 'Pure' if int(psi4.get_global('PUREAM')) else 'Cartesian'
continue
#end case where puream setting wasn't forced by caller
# Do some matches
if comment.match(line):
continue
if separator.match(line):
continue
# Match: H 0
# or: H O... 0
if atom_array.match(line):
what = atom_array.match(line).group(1).split()
# Check the captures and see if this basis set is for the atom we need.
found = False
if symbol in [x.upper() for x in what]:
found = True
msg = """line %5d""" % (lineno)
# Read in the next line
line = lines[lineno]
lineno += 1
# Need to do the following until we match a "****" which is the end of the basis set
while not separator.match(line):
# Match shell information
if shell.match(line):
what = shell.match(line)
shell_type = str(what.group(1)).upper()
nprimitive = int(what.group(2))
scale = float(what.group(3))
if len(shell_type) == 1:
am = shell_to_am[shell_type[0]]
exponents = [0.0] * nprimitive
contractions = [0.0] * nprimitive
for p in range(nprimitive):
line = lines[lineno]
lineno += 1
line = line.replace('D', 'e', 2)
line = line.replace('d', 'e', 2)
what = primitives1.match(line)
# Must match primitives1; will work on the others later
if not what:
raise ValidationError("""Gaussian94BasisSetParser::parse: Unable to match an exponent with one contraction: line %d: %s""" % (lineno, line))
exponent = float(what.group(1))
contraction = float(what.group(2))
# Scale the contraction and save the information
contraction *= scale
exponents[p] = exponent
contractions[p] = contraction
# We have a full shell, push it to the basis set
shell_list.append(ShellInfo(am, contractions, exponents,
gaussian_type, 0, center, 0, 'Unnormalized'))
elif len(shell_type) == 2:
# This is to handle instances of SP, PD, DF, FG, ...
am1 = shell_to_am[shell_type[0]]
am2 = shell_to_am[shell_type[1]]
exponents = [0.0] * nprimitive
contractions1 = [0.0] * nprimitive
contractions2 = [0.0] * nprimitive
for p in range(nprimitive):
line = lines[lineno]
lineno += 1
line = line.replace('D', 'e', 2)
line = line.replace('d', 'e', 2)
what = primitives2.match(line)
# Must match primitivies2
if not what:
raise ValidationError("Gaussian94BasisSetParser::parse: Unable to match an exponent with two contractions: line %d: %s" % (lineno, line))
exponent = float(what.group(1))
contraction = float(what.group(2))
# Scale the contraction and save the information
contraction *= scale
exponents[p] = exponent
contractions1[p] = contraction
# Do the other contraction
contraction = float(what.group(3))
# Scale the contraction and save the information
contraction *= scale
contractions2[p] = contraction
shell_list.append(ShellInfo(am1, contractions1, exponents,
gaussian_type, 0, center, 0, 'Unnormalized'))
shell_list.append(ShellInfo(am2, contractions2, exponents,
gaussian_type, 0, center, 0, 'Unnormalized'))
else:
raise ValidationError("""Gaussian94BasisSetParser::parse: Unable to parse basis sets with spd, or higher grouping""")
else:
raise ValidationError("""Gaussian94BasisSetParser::parse: Expected shell information, but got: line %d: %s""" % (lineno, line))
line = lines[lineno]
lineno += 1
break
if not found:
#raise BasisSetNotFound("Gaussian94BasisSetParser::parser: Unable to find the basis set for %s in %s" % \
# (symbol, self.filename), silent=True)
return None, None
return shell_list, msg
|
kratman/psi4public
|
psi4/driver/qcdb/libmintsbasissetparser.py
|
Python
|
gpl-2.0
| 12,796
|
[
"Gaussian",
"Psi4"
] |
cf775c85ec7c71bdbddea3cc804d5882c07b19c68ed199cc2c6e0d1c6109abb6
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import numpy as np
import pytest
import MDAnalysis as mda
from MDAnalysis.coordinates.chemfiles import ChemfilesReader, ChemfilesWriter
from MDAnalysis.coordinates.chemfiles import check_chemfiles_version
from MDAnalysisTests import datafiles
from MDAnalysisTests.coordinates.base import (
MultiframeReaderTest,
BaseWriterTest,
BaseReference,
)
from MDAnalysisTests.coordinates.test_xyz import XYZReference
# skip entire test module if no appropriate chemfiles
chemfiles = pytest.importorskip("chemfiles")
@pytest.mark.parametrize('version', ['0.9.3', '0.11.0', '1.1.0'])
def test_version_check(version, monkeypatch):
monkeypatch.setattr('chemfiles.__version__', version)
assert not check_chemfiles_version()
with pytest.raises(RuntimeError, match="Please install Chemfiles > 0.10"):
ChemfilesReader("")
with pytest.raises(RuntimeError, match="Please install Chemfiles > 0.10"):
ChemfilesWriter("")
@pytest.mark.skipif(not check_chemfiles_version(), reason="Wrong version of chemfiles")
class TestChemfileXYZ(MultiframeReaderTest):
@staticmethod
@pytest.fixture
def ref():
base = XYZReference()
base.writer = ChemfilesWriter
base.dimensions = None
return base
@pytest.fixture
def reader(self, ref):
reader = ChemfilesReader(ref.trajectory)
reader.add_auxiliary(
"lowf",
ref.aux_lowf,
dt=ref.aux_lowf_dt,
initial_time=0,
time_selector=None,
)
reader.add_auxiliary(
"highf",
ref.aux_highf,
dt=ref.aux_highf_dt,
initial_time=0,
time_selector=None,
)
return reader
class ChemfilesXYZReference(BaseReference):
def __init__(self):
super(ChemfilesXYZReference, self).__init__()
self.trajectory = datafiles.COORDINATES_XYZ
self.topology = datafiles.COORDINATES_XYZ
self.reader = ChemfilesReader
self.writer = ChemfilesWriter
self.ext = "xyz"
self.volume = 0
self.dimensions = None
@pytest.mark.skipif(not check_chemfiles_version(), reason="Wrong version of chemfiles")
class TestChemfilesReader(MultiframeReaderTest):
@staticmethod
@pytest.fixture()
def ref():
return ChemfilesXYZReference()
@pytest.mark.skipif(not check_chemfiles_version(), reason="Wrong version of chemfiles")
class TestChemfilesWriter(BaseWriterTest):
@staticmethod
@pytest.fixture()
def ref():
return ChemfilesXYZReference()
# Disable 'test_no_container' as it try to open a file for writing without
# extension.
def test_no_container(self, ref):
pass
def test_no_extension_raises(self, ref):
with pytest.raises(chemfiles.ChemfilesError):
ref.writer("foo")
@pytest.mark.skipif(not check_chemfiles_version(), reason="Wrong version of chemfiles")
class TestChemfiles(object):
def test_read_chemfiles_format(self):
u = mda.Universe(
datafiles.LAMMPSdata,
format="chemfiles",
topology_format="data",
chemfiles_format="LAMMPS Data",
)
for ts in u.trajectory:
assert ts.n_atoms == 18364
def test_changing_system_size(self, tmpdir):
outfile = "chemfiles-changing-size.xyz"
with tmpdir.as_cwd():
with open(outfile, "w") as fd:
fd.write(VARYING_XYZ)
u = mda.Universe(outfile, format="chemfiles", topology_format="XYZ")
with pytest.raises(IOError):
u.trajectory._read_next_timestep()
def test_wrong_open_mode(self):
with pytest.raises(IOError):
_ = ChemfilesWriter("", mode="r")
def check_topology(self, reference, file):
u = mda.Universe(reference)
atoms = set([(atom.name, atom.type, atom.record_type) for atom in u.atoms])
bonds = set([(bond.atoms[0].ix, bond.atoms[1].ix) for bond in u.bonds])
check = mda.Universe(file)
np.testing.assert_equal(
u.trajectory.ts.positions,
check.trajectory.ts.positions,
)
for atom in check.atoms:
assert (atom.name, atom.type, atom.record_type) in atoms
for bond in check.bonds:
assert (bond.atoms[0].ix, bond.atoms[1].ix) in bonds
def test_write_topology(self, tmpdir):
u = mda.Universe(datafiles.CONECT)
outfile = "chemfiles-write-topology.pdb"
with tmpdir.as_cwd():
with ChemfilesWriter(outfile) as writer:
writer.write(u)
self.check_topology(datafiles.CONECT, outfile)
# Manually setting the topology when creating the ChemfilesWriter
# (1) from an object
with ChemfilesWriter(outfile, topology=u) as writer:
writer.write(u)
self.check_topology(datafiles.CONECT, outfile)
# (2) from a file
with ChemfilesWriter(outfile, topology=datafiles.CONECT) as writer:
writer.write(u)
# FIXME: this does not work, since chemfiles also insert the bonds
# which are implicit in PDB format (between standard residues), while
# MDAnalysis only read the explicit CONNECT records.
# self.check_topology(datafiles.CONECT, outfile)
def test_write_atom_group(self, tmpdir):
u = mda.Universe(datafiles.CONECT)
group = u.select_atoms("resname ARG")
with tmpdir.as_cwd():
outfile = "chemfiles-write-atom-group.pdb"
with ChemfilesWriter(outfile) as writer:
writer.write(group)
check = mda.Universe(outfile)
assert check.trajectory.ts.n_atoms == group.n_atoms
def test_write_velocities(self, tmpdir):
u = mda.Universe.empty(4, trajectory=True)
u.add_TopologyAttr("type", values=["H", "H", "H", "H"])
ts = u.trajectory.ts
ts.dimensions = [20, 30, 41, 90, 90, 90]
ts.positions = [
[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[4, 4, 4],
]
ts.velocities = [
[10, 10, 10],
[20, 20, 20],
[30, 30, 30],
[40, 40, 40],
]
outfile = "chemfiles-write-velocities.lmp"
with tmpdir.as_cwd():
with ChemfilesWriter(
outfile, topology=u, chemfiles_format="LAMMPS Data"
) as writer:
writer.write(u)
with open(outfile) as file:
content = file.read()
assert content == EXPECTED_LAMMPS_DATA
VARYING_XYZ = """2
A 0 0 0
A 0 0 0
4
A 0 0 0
A 0 0 0
A 0 0 0
A 0 0 0
"""
EXPECTED_LAMMPS_DATA = """LAMMPS data file -- atom_style full -- generated by chemfiles
4 atoms
0 bonds
0 angles
0 dihedrals
0 impropers
1 atom types
0 bond types
0 angle types
0 dihedral types
0 improper types
0 20.0 xlo xhi
0 30.0 ylo yhi
0 41.0 zlo zhi
# Pair Coeffs
# 1 H
Masses
1 0.0 # H
Atoms # full
1 1 1 0.0 1.0 1.0 1.0 # H
2 2 1 0.0 2.0 2.0 2.0 # H
3 3 1 0.0 3.0 3.0 3.0 # H
4 4 1 0.0 4.0 4.0 4.0 # H
Velocities
1 10.0 10.0 10.0
2 20.0 20.0 20.0
3 30.0 30.0 30.0
4 40.0 40.0 40.0
"""
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/coordinates/test_chemfiles.py
|
Python
|
gpl-2.0
| 8,311
|
[
"Chemfiles",
"LAMMPS",
"MDAnalysis"
] |
fd98e4d3ec1b31cfa0dda1238bef1cc986c3f266c2d99ce27491331c5a96c062
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Panels lib
#
# Copyright (C) 2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import json
import logging
from os import sys
import requests
from grimoire.elk.elastic import ElasticSearch
def get_dashboard_json(elastic, dashboard):
dash_json_url = elastic.index_url+"/dashboard/"+dashboard
r = requests.get(dash_json_url, verify=False)
dash_json = r.json()
if "_source" not in dash_json:
logging.error("Can not find dashboard: %s", dashboard)
print (dash_json_url)
sys.exit(1)
return dash_json['_source']
def get_vis_json(elastic, vis):
vis_json_url = elastic.index_url+"/visualization/"+vis
r = requests.get(vis_json_url, verify=False)
vis_json = r.json()
if "_source" not in vis_json:
logging.error("Can not find vis: %s (%s)", vis, vis_json_url)
return
return vis_json['_source']
def get_search_json(elastic, search_id):
search_json_url = elastic.index_url+"/search/"+search_id
r = requests.get(search_json_url, verify=False)
search_json = r.json()
if "_source" not in search_json:
logging.error("Can not find search: %s (%s)", search_json_url)
return
return search_json['_source']
def get_index_pattern_json(elastic, index_pattern):
index_pattern_json_url = elastic.index_url+"/index-pattern/"+index_pattern
r = requests.get(index_pattern_json_url, verify=False)
index_pattern_json = r.json()
if "_source" not in index_pattern_json:
logging.error("Can not find index_pattern_json: %s", index_pattern_json_url)
return
return index_pattern_json['_source']
def get_search_from_vis(elastic, vis):
search_id = None
vis_json = get_vis_json(elastic, vis)
if not vis_json:
search_id
# The index pattern could be in search or in state
# First search for it in saved search
if "savedSearchId" in vis_json:
search_id = vis_json["savedSearchId"]
return search_id
def create_search(elastic_url, dashboard, index_pattern, es_index=None):
""" Create the base search for vis if used
:param elastic_url: URL for ElasticSearch (ES) server
:param dashboard: kibana dashboard to be used as template
:param enrich_index: ES index with enriched items used in the new dashboard
"""
search_id = None
if not es_index:
es_index = ".kibana"
elastic = ElasticSearch(elastic_url, es_index)
dash_data = get_dashboard_json(elastic, dashboard)
# First vis
if "panelsJSON" not in dash_data:
logging.error("Can not find vis in dashboard: %s", dashboard)
raise
# Get the search from the first vis in the panel
for panel in json.loads(dash_data["panelsJSON"]):
panel_id = panel["id"]
logging.debug("Checking search in %s vis", panel_id)
search_id = get_search_from_vis(elastic, panel_id)
if search_id:
break
# And now time to create the search found
if not search_id:
logging.info("Can't find search %s", dashboard)
return
logging.debug("Found template search %s", search_id)
search_json = get_search_json(elastic, search_id)
search_source = search_json['kibanaSavedObjectMeta']['searchSourceJSON']
new_search_source = json.loads(search_source)
new_search_source['index'] = index_pattern
new_search_source = json.dumps(new_search_source)
search_json['kibanaSavedObjectMeta']['searchSourceJSON'] = new_search_source
search_json['title'] += " " + index_pattern
new_search_id = search_id+"__"+index_pattern
url = elastic.index_url+"/search/"+new_search_id
requests.post(url, data = json.dumps(search_json), verify=False)
logging.debug("New search created: %s", url)
return new_search_id
def get_index_pattern_from_meta(meta_data):
index = None
mdata = meta_data["searchSourceJSON"]
mdata = json.loads(mdata)
if "index" in mdata:
index = mdata["index"]
if "filter" in mdata:
if len(mdata["filter"]) > 0:
index = mdata["filter"][0]["meta"]["index"]
return index
def get_index_pattern_from_vis(elastic, vis):
index_pattern = None
vis_json = get_vis_json(elastic, vis)
if not vis_json:
return
# The index pattern could be in search or in state
# First search for it in saved search
if "savedSearchId" in vis_json:
search_json_url = elastic.index_url+"/search/"+vis_json["savedSearchId"]
search_json = requests.get(search_json_url, verify=False).json()["_source"]
index_pattern = get_index_pattern_from_meta(search_json["kibanaSavedObjectMeta"])
elif "kibanaSavedObjectMeta" in vis_json:
index_pattern = get_index_pattern_from_meta(vis_json["kibanaSavedObjectMeta"])
return index_pattern
def create_index_pattern(elastic_url, dashboard, enrich_index, es_index=None):
""" Create a index pattern using as template the index pattern
in dashboard template vis
:param elastic_url: URL for ElasticSearch (ES) server
:param dashboard: kibana dashboard to be used as template
:param enrich_index: ES index with enriched items used in the new dashboard
"""
index_pattern = None
if not es_index:
es_index = ".kibana"
elastic = ElasticSearch(elastic_url, es_index)
dash_data = get_dashboard_json(elastic, dashboard)
# First vis
if "panelsJSON" not in dash_data:
logging.error("Can not find vis in dashboard: %s", dashboard)
raise
# Get the index pattern from the first vis in the panel
# that as index pattern data
for panel in json.loads(dash_data["panelsJSON"]):
panel_id = panel["id"]
logging.debug("Checking index pattern in %s vis", panel_id)
index_pattern = get_index_pattern_from_vis(elastic, panel_id)
if index_pattern:
break
# And now time to create the index pattern found
if not index_pattern:
logging.error("Can't find index pattern for %s", dashboard)
raise
logging.debug("Found %s template index pattern", index_pattern)
new_index_pattern_json = get_index_pattern_json(elastic, index_pattern)
new_index_pattern_json['title'] = enrich_index
url = elastic.index_url+"/index-pattern/"+enrich_index
requests.post(url, data = json.dumps(new_index_pattern_json), verify=False)
logging.debug("New index pattern created: %s", url)
return enrich_index
def create_dashboard(elastic_url, dashboard, enrich_index, kibana_host, es_index=None):
""" Create a new dashboard using dashboard as template
and reading the data from enriched_index """
def new_panels(elastic, panels, search_id):
""" Create the new panels and their vis for the dashboard from the
panels in the template dashboard """
dash_vis_ids = []
new_panels = []
for panel in panels:
if panel['type'] in ['visualization', 'search']:
if panel['type'] == 'visualization':
dash_vis_ids.append(panel['id'])
panel['id'] += "__"+enrich_index
if panel['type'] == 'search':
panel['id'] = search_id
new_panels.append(panel)
create_vis(elastic, dash_vis_ids, search_id)
return new_panels
def create_vis(elastic, dash_vis_ids, search_id):
""" Create new visualizations for the dashboard """
# Create visualizations for the new dashboard
item_template_url = elastic.index_url+"/visualization"
# Hack: Get all vis if they are <10000. Use scroll API to get all.
# Better: use mget to get all vis in dash_vis_ids
item_template_url_search = item_template_url+"/_search?size=10000"
r = requests.get(item_template_url_search, verify=False)
all_visualizations =r.json()['hits']['hits']
visualizations = []
for vis in all_visualizations:
if vis['_id'] in dash_vis_ids:
visualizations.append(vis)
logging.info("Total template vis found: %i", len(visualizations))
for vis in visualizations:
vis_data = vis['_source']
vis_name = vis['_id'].split("_")[-1]
vis_id = vis_name+"__"+enrich_index
vis_data['title'] = vis_id
vis_meta = json.loads(vis_data['kibanaSavedObjectMeta']['searchSourceJSON'])
vis_meta['index'] = enrich_index
vis_data['kibanaSavedObjectMeta']['searchSourceJSON'] = json.dumps(vis_meta)
if "savedSearchId" in vis_data:
vis_data["savedSearchId"] = search_id
url = item_template_url+"/"+vis_id
r = requests.post(url, data = json.dumps(vis_data), verify=False)
logging.debug("Created new vis %s", url)
if not es_index:
es_index = ".kibana"
# First create always the index pattern as data source
index_pattern = create_index_pattern(elastic_url, dashboard, enrich_index, es_index)
# If search is used create a new search with the new index_pàttern
search_id = create_search(elastic_url, dashboard, index_pattern, es_index)
elastic = ElasticSearch(elastic_url, es_index)
# Create the new dashboard from the template
dash_data = get_dashboard_json(elastic, dashboard)
dash_data['title'] = enrich_index
# Load template panels to create the new ones with their new vis
panels = json.loads(dash_data['panelsJSON'])
dash_data['panelsJSON'] = json.dumps(new_panels(elastic, panels, search_id))
dash_path = "/dashboard/"+dashboard+"__"+enrich_index
url = elastic.index_url + dash_path
requests.post(url, data = json.dumps(dash_data), verify=False)
dash_url = kibana_host+"/app/kibana#"+dash_path
return dash_url
def list_dashboards(elastic_url, es_index=None):
if not es_index:
es_index = ".kibana"
elastic = ElasticSearch(elastic_url, es_index)
dash_json_url = elastic.index_url+"/dashboard/_search?size=10000"
print (dash_json_url)
r = requests.get(dash_json_url, verify=False)
res_json = r.json()
if "hits" not in res_json:
logging.error("Can't find dashboards")
raise RuntimeError("Can't find dashboards")
for dash in res_json["hits"]["hits"]:
print (dash["_id"])
def import_dashboard(elastic_url, import_file, es_index=None):
logging.debug("Reading from %s the JSON for the dashboard to be imported",
import_file)
with open(import_file, 'r') as f:
try:
kibana = json.loads(f.read())
except ValueError:
logging.error("Wrong file format")
sys.exit(1)
if 'dashboard' not in kibana:
logging.error("Wrong file format. Can't find 'dashboard' field.")
sys.exit(1)
if not es_index:
es_index = ".kibana"
elastic = ElasticSearch(elastic_url, es_index)
url = elastic.index_url+"/dashboard/"+kibana['dashboard']['id']
requests.post(url, data = json.dumps(kibana['dashboard']['value']), verify=False)
if 'searches' in kibana:
for search in kibana['searches']:
url = elastic.index_url+"/search/"+search['id']
requests.post(url, data = json.dumps(search['value']), verify=False)
if 'index_patterns' in kibana:
for index in kibana['index_patterns']:
url = elastic.index_url+"/index-pattern/"+index['id']
requests.post(url, data = json.dumps(index['value']), verify=False)
if 'visualizations' in kibana:
for vis in kibana['visualizations']:
url = elastic.index_url+"/visualization"+"/"+vis['id']
requests.post(url, data = json.dumps(vis['value']), verify=False)
logging.debug("Done")
def export_dashboard(elastic_url, dash_id, export_file, es_index=None):
# Kibana dashboard fields
kibana = {"dashboard": None,
"visualizations": [],
"index_patterns": [],
"searches": []}
# Used to avoid having duplicates
search_ids_done = []
index_ids_done = []
logging.debug("Exporting dashboard %s to %s", dash_id, export_file)
if not es_index:
es_index = ".kibana"
elastic = ElasticSearch(elastic_url, es_index)
kibana["dashboard"] = {"id":dash_id, "value":get_dashboard_json(elastic, dash_id)}
if "panelsJSON" not in kibana["dashboard"]["value"]:
# The dashboard is empty. No visualizations included.
return kibana
# Export all visualizations and the index patterns and searches in them
for panel in json.loads(kibana["dashboard"]["value"]["panelsJSON"]):
if panel['type'] in ['visualization']:
vis_id = panel['id']
vis_json = get_vis_json(elastic, vis_id)
kibana["visualizations"].append({"id": vis_id, "value": vis_json})
search_id = get_search_from_vis(elastic, vis_id)
if search_id and search_id not in search_ids_done:
search_ids_done.append(search_id)
kibana["searches"].append({"id":search_id,
"value":get_search_json(elastic, search_id)})
index_pattern_id = get_index_pattern_from_vis(elastic, vis_id)
if index_pattern_id and index_pattern_id not in index_ids_done:
index_ids_done.append(index_pattern_id)
kibana["index_patterns"].append({"id":index_pattern_id,
"value":get_index_pattern_json(elastic, index_pattern_id)})
logging.debug("Done")
with open(export_file, 'w') as f:
f.write(json.dumps(kibana, indent=4, sort_keys=True))
|
sanacl/GrimoireELK
|
grimoire/panels.py
|
Python
|
gpl-3.0
| 14,619
|
[
"Elk"
] |
d88f3d71df9115d86e3c28ade4077e288befe163ad23c1f8601004ff7adadb83
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Configurator.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_ConfiguratorWidget(object):
def setupUi(self, ConfiguratorWidget):
ConfiguratorWidget.setObjectName(_fromUtf8("ConfiguratorWidget"))
ConfiguratorWidget.resize(394, 304)
self.gaussian_frame = QtGui.QFrame(ConfiguratorWidget)
self.gaussian_frame.setGeometry(QtCore.QRect(0, 0, 421, 331))
self.gaussian_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.gaussian_frame.setFrameShadow(QtGui.QFrame.Raised)
self.gaussian_frame.setObjectName(_fromUtf8("gaussian_frame"))
self.size_label = QtGui.QLabel(self.gaussian_frame)
self.size_label.setGeometry(QtCore.QRect(20, 30, 231, 51))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.size_label.setFont(font)
self.size_label.setObjectName(_fromUtf8("size_label"))
self.x_label = QtGui.QLabel(self.gaussian_frame)
self.x_label.setGeometry(QtCore.QRect(20, 70, 41, 51))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.x_label.setFont(font)
self.x_label.setObjectName(_fromUtf8("x_label"))
self.y_label = QtGui.QLabel(self.gaussian_frame)
self.y_label.setGeometry(QtCore.QRect(20, 110, 41, 51))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.y_label.setFont(font)
self.y_label.setObjectName(_fromUtf8("y_label"))
self.sliderX = QtGui.QSlider(self.gaussian_frame)
self.sliderX.setGeometry(QtCore.QRect(70, 90, 160, 22))
self.sliderX.setOrientation(QtCore.Qt.Horizontal)
self.sliderX.setObjectName(_fromUtf8("sliderX"))
self.sliderY = QtGui.QSlider(self.gaussian_frame)
self.sliderY.setGeometry(QtCore.QRect(70, 130, 160, 22))
self.sliderY.setOrientation(QtCore.Qt.Horizontal)
self.sliderY.setObjectName(_fromUtf8("sliderY"))
self.sigmas_label = QtGui.QLabel(self.gaussian_frame)
self.sigmas_label.setGeometry(QtCore.QRect(10, 160, 211, 51))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.sigmas_label.setFont(font)
self.sigmas_label.setObjectName(_fromUtf8("sigmas_label"))
self.sigma_x_label = QtGui.QLabel(self.gaussian_frame)
self.sigma_x_label.setGeometry(QtCore.QRect(20, 200, 41, 51))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.sigma_x_label.setFont(font)
self.sigma_x_label.setObjectName(_fromUtf8("sigma_x_label"))
self.sigma_y_label = QtGui.QLabel(self.gaussian_frame)
self.sigma_y_label.setGeometry(QtCore.QRect(20, 240, 41, 51))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.sigma_y_label.setFont(font)
self.sigma_y_label.setObjectName(_fromUtf8("sigma_y_label"))
self.sliderX_sigma = QtGui.QSlider(self.gaussian_frame)
self.sliderX_sigma.setGeometry(QtCore.QRect(70, 220, 160, 22))
self.sliderX_sigma.setOrientation(QtCore.Qt.Horizontal)
self.sliderX_sigma.setObjectName(_fromUtf8("sliderX_sigma"))
self.sliderY_sigma = QtGui.QSlider(self.gaussian_frame)
self.sliderY_sigma.setGeometry(QtCore.QRect(70, 250, 160, 22))
self.sliderY_sigma.setOrientation(QtCore.Qt.Horizontal)
self.sliderY_sigma.setObjectName(_fromUtf8("sliderY_sigma"))
self.apply_btn = QtGui.QPushButton(self.gaussian_frame)
self.apply_btn.setGeometry(QtCore.QRect(270, 250, 81, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
self.apply_btn.setFont(font)
self.apply_btn.setObjectName(_fromUtf8("apply_btn"))
self.info_label = QtGui.QLabel(self.gaussian_frame)
self.info_label.setGeometry(QtCore.QRect(20, 10, 351, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.info_label.setFont(font)
self.info_label.setObjectName(_fromUtf8("info_label"))
self.retranslateUi(ConfiguratorWidget)
QtCore.QMetaObject.connectSlotsByName(ConfiguratorWidget)
def retranslateUi(self, ConfiguratorWidget):
ConfiguratorWidget.setWindowTitle(_translate("ConfiguratorWidget", "Gaussian Configurator", None))
self.size_label.setText(_translate("ConfiguratorWidget", "Matrix Size: (X, Y) ", None))
self.x_label.setText(_translate("ConfiguratorWidget", " X:", None))
self.y_label.setText(_translate("ConfiguratorWidget", " Y:", None))
self.sigmas_label.setText(_translate("ConfiguratorWidget", " Sigmas Value: X, Y", None))
self.sigma_x_label.setText(_translate("ConfiguratorWidget", " X:", None))
self.sigma_y_label.setText(_translate("ConfiguratorWidget", " Y:", None))
self.apply_btn.setText(_translate("ConfiguratorWidget", "Apply", None))
self.info_label.setText(_translate("ConfiguratorWidget", " At least have to select one of the two fields.", None))
|
rainer85ah/VisionViewer
|
src/Builders/GaussianBlur/Configurator.py
|
Python
|
mit
| 6,222
|
[
"Gaussian"
] |
5842c691dc6dabc3fcfe2632e6931d7f59b681f5f00ec42dc16d8a80c258a13b
|
"""Wigner-Seitz truncated coulomb interaction.
See:
Ravishankar Sundararaman and T. A. Arias:
Phys. Rev. B 87, 165122 (2013)
Regularization of the Coulomb singularity in exact exchange by
Wigner-Seitz truncated interactions: Towards chemical accuracy
in nontrivial systems
"""
import sys
from math import pi
import numpy as np
from ase.units import Bohr
from ase.utils import prnt
import gpaw.mpi as mpi
from gpaw.utilities import erf
from gpaw.fftw import get_efficient_fft_size
from gpaw.grid_descriptor import GridDescriptor
class WignerSeitzTruncatedCoulomb:
def __init__(self, cell_cv, nk_c, txt=sys.stdout):
self.nk_c = nk_c
bigcell_cv = cell_cv * nk_c[:, np.newaxis]
L_c = (np.linalg.inv(bigcell_cv)**2).sum(0)**-0.5
rc = 0.5 * L_c.min()
prnt('Inner radius for %dx%dx%d Wigner-Seitz cell: %.3f Ang' %
(tuple(nk_c) + (rc * Bohr,)), file=txt)
self.a = 5 / rc
prnt('Range-separation parameter: %.3f Ang^-1' % (self.a / Bohr),
file=txt)
# nr_c = [get_efficient_fft_size(2 * int(L * self.a * 1.5))
nr_c = [get_efficient_fft_size(2 * int(L * self.a * 3.0))
for L in L_c]
prnt('FFT size for calculating truncated Coulomb: %dx%dx%d' %
tuple(nr_c), file=txt)
self.gd = GridDescriptor(nr_c, bigcell_cv, comm=mpi.serial_comm)
v_R = self.gd.empty()
v_i = v_R.ravel()
pos_iv = self.gd.get_grid_point_coordinates().reshape((3, -1)).T
corner_jv = np.dot(np.indices((2, 2, 2)).reshape((3, 8)).T, bigcell_cv)
for i, pos_v in enumerate(pos_iv):
r = ((pos_v - corner_jv)**2).sum(axis=1).min()**0.5
if r == 0:
v_i[i] = 2 * self.a / pi**0.5
else:
v_i[i] = erf(self.a * r) / r
self.K_Q = np.fft.fftn(v_R) * self.gd.dv
def get_potential(self, pd):
q_c = pd.kd.bzk_kc[0]
shift_c = (q_c * self.nk_c).round().astype(int)
max_c = self.gd.N_c // 2
K_G = pd.zeros()
N_c = pd.gd.N_c
for G, Q in enumerate(pd.Q_qG[0]):
Q_c = (np.unravel_index(Q, N_c) + N_c // 2) % N_c - N_c // 2
Q_c = Q_c * self.nk_c + shift_c
if (abs(Q_c) < max_c).all():
K_G[G] = self.K_Q[tuple(Q_c)]
G2_G = pd.G2_qG[0]
a = self.a
if pd.kd.gamma:
K_G[0] += pi / a**2
else:
K_G[0] += 4 * pi * (1 - np.exp(-G2_G[0] / (4 * a**2))) / G2_G[0]
K_G[1:] += 4 * pi * (1 - np.exp(-G2_G[1:] / (4 * a**2))) / G2_G[1:]
assert pd.dtype == complex
return K_G
|
robwarm/gpaw-symm
|
gpaw/response/wstc.py
|
Python
|
gpl-3.0
| 2,740
|
[
"ASE",
"GPAW"
] |
8982152e7113aebfcc5a95079671429125c2545c7b8c52fef857f02d5e842b98
|
#!/usr/bin/env python3
'''
Unpack ARM instruction XML files extracting the encoding information
and ASL code within it.
'''
import argparse
import glob
import json
import os
import re
import string
import sys
import xml.etree.cElementTree as ET
from collections import defaultdict
from itertools import takewhile
include_regex = None
exclude_regex = None
########################################################################
# Tag file support
########################################################################
tags = set()
'''
Write content to a 'tag file' suppressing duplicate information
'''
def emit(f, tag, content):
if tag not in tags: # suppress duplicate entries
tags.add(tag)
print('TAG:'+tag, file=f)
print(content, file=f)
########################################################################
# Workarounds
########################################################################
# workaround: v8-A code still uses the keyword 'type' as a variable name
# change that to 'type1'
def patchTypeAsVar(x):
return re.sub(r'([^a-zA-Z0-9_\n])type([^a-zA-Z0-9_])', r'\1type1\2', x)
########################################################################
# Classes
########################################################################
class ASL:
'''Representation of ASL code consisting of the code, list of names it defines and list of dependencies'''
def __init__(self, name, code, defs, deps):
self.name = name
self.code = code
self.defs = defs
self.deps = deps
def emit(self, file, tag):
emit(file, tag, self.code)
def put(self, ofile, indent):
for l in self.code.splitlines():
print(" "*indent + l, file=ofile)
def __str__(self):
return "ASL{"+", ".join([self.name, str(self.defs), str(self.deps)])+"}"
# workaround: patch all ASL code with extra dependencies
def patchDependencies(self, chunks):
for line in self.code.splitlines():
l = re.split('//', line)[0] # drop comments
for m in re.finditer('''([a-zA-Z_]\w+(\.\w+)?\[?)''', l):
n = m.group(1)
if n in chunks:
self.deps |= {chunks[n].name}
self.deps |= {n}
# print("Adding dep", n, chunks[n].name)
self.deps -= self.defs
# Workaround: ProcState SP field incorrectly handled
if self.name == "shared/functions/system/ProcState": self.deps -= {"SP", "SP.write.none"}
if "Unpredictable_WBOVERLAPST" in self.defs: self.deps -= {"PSTATE"}
# workaround: v8-A code still uses the keyword 'type' as a variable name
# change that to 'type1'
def patchTypeVar(self):
self.code = patchTypeAsVar(self.code)
def toPrototype(self):
'''Strip function bodies out of ASL
This is used when a function is cut but we still need to keep
the function body.'''
# build groups of lines based on whether they have matching numbers of parentheses
groups = []
group = []
parens = 0
for l in self.code.splitlines():
group.append(l)
# update count of matching parentheses
openers = len(re.findall('[([]', l))
closers = len(re.findall('[)\]]', l))
parens = parens + openers - closers
if parens == 0:
groups.append(group)
group = []
# crude heuristic for function bodies: starts with blank chars
# beware: only works if the ASL block only contains functions
lines = [ l for g in groups if not g[0].startswith(" ") for l in g ]
# print("Generating prototype for "+self.name)
# print(" "+"\n ".join(lines))
return ASL(self.name, '\n'.join(lines), self.defs, set())
# Test whether instruction encoding has a field with given name
def hasField(fields, nm):
return any(f == nm for (_, _, f, _, _) in fields)
# Turn instruction and encoding names into identifiers
# e.g., "aarch32/UHSAX/A1_A" becomes "aarch32_UHSAX_A1_A"
# and remove dots from "LDNT1D_Z.P.BR_Contiguous"
def deslash(nm):
return nm.replace("/instrs","").replace("/", "_").replace("-","_").replace(".","_")
class Instruction:
'''Representation of Instructions'''
def __init__(self, name, encs, post, conditional, exec):
self.name = name
self.encs = encs
self.post = post
self.conditional = conditional
self.exec = exec
def emit_asl_syntax(self, ofile):
print("__instruction "+ deslash(self.name), file=ofile)
for (inm,insn_set,fields,dec) in self.encs:
unpreds = []
pattern = "" # todo: assumes that fields are sorted in order
print(" __encoding "+ deslash(inm), file=ofile)
print(" __instruction_set "+ insn_set, file=ofile)
for (hi, lo, nm, split, consts) in fields:
# assert(not split) todo
wd = (hi - lo) + 1
if re.fullmatch("(\([01]\))+", nm):
# workaround
consts = nm
nm = '_'
# convert all the 'should be' bits to 'unpredictable_unless'
cs = ""
i = hi
while consts != "":
if consts.startswith("(1)") or consts.startswith("(0)"):
unpreds.append((i, consts[1]))
cs = cs + "x"
consts = consts[3:]
elif consts[0] in "01x":
cs = cs + consts[0]
consts = consts[1:]
else:
print("Malformed field "+consts)
assert False
i = i - 1
consts = cs
assert len(consts) == wd
pattern = pattern + consts
nm = patchTypeAsVar(nm) # workaround
if nm != "_":
print(" __field "+nm+" "+str(lo)+" +: "+str(wd), file=ofile)
pattern = [ pattern[i:i+8] for i in range(0, len(pattern), 8) ]
print(" __opcode '" + " ".join(pattern) + "'", file=ofile)
guard = "cond != '1111'" if insn_set == "A32" and hasField(fields, "cond") else "TRUE";
print(" __guard "+guard, file=ofile)
for (i, v) in unpreds:
print(" __unpredictable_unless "+str(i)+" == '"+v+"'", file=ofile)
print(" __decode", file=ofile)
dec.patchTypeVar()
dec.put(ofile, 12)
print(file=ofile)
if self.post:
print(" __postdecode", file=ofile)
self.post.patchTypeVar()
self.post.put(ofile, 8)
if self.conditional:
print(" __execute __conditional", file=ofile)
else:
print(" __execute", file=ofile)
self.exec.patchTypeVar()
self.exec.put(ofile, 8)
def emit_tag_syntax(self, file):
index = [] # index of sections of this instruction
exec_tag = self.name+':execute'
post_tag = self.name+':postdecode'
idx_tag = self.name+':index'
self.exec.emit(file, exec_tag)
index.append('Execute: '+exec_tag)
if self.post:
self.post.emit(file, post_tag)
index.append('Postdecode: '+post_tag)
for (inm,insn_set,fields,dec) in self.encs:
dec_tag = inm + ':decode'
enc_tag = inm + ':diagram'
enc = [insn_set]
enc.extend([str(hi)+":"+str(lo)+" "+nm+" "+consts
for (hi,lo,nm,_,consts) in fields ])
emit(file, enc_tag, "\n".join(enc))
dec.emit(file, dec_tag)
index.append('Decode: '+dec_tag+'@'+enc_tag)
emit(file, idx_tag, "\n".join(index))
def emit_sail_ast(self, previous_clauses, file):
for enc in self.encs:
enc_name, enc_iset, enc_fields, enc_asl = enc
fields = [(nm, hi - lo + 1) for (hi, lo, nm, split, consts) in enc_fields if nm != '_']
typed_fields = ['/* {} : */ bits({})'.format(name, length) for (name, length) in fields]
if len(typed_fields) < 1:
clause = 'union clause ast = ' + sanitize(enc_name) + ' : unit'
else:
clause = 'union clause ast = ' + sanitize(enc_name) + ' : (' + ', '.join(typed_fields) + ')'
if clause not in previous_clauses:
print(clause, file=file)
previous_clauses.add(clause)
def __str__(self):
encs = "["+ ", ".join([inm for (inm,_,_,_) in self.encs]) +"]"
return "Instruction{" + ", ".join([encs, (self.post.name if self.post else "-"), self.exec.name])+", "+conditional+"}"
########################################################################
# Extracting information from XML files
########################################################################
alt_slice_syntax = False
demangle_instr = False
'''
Read pseudocode to extract ASL.
'''
def readASL(ps):
name = ps.attrib["name"]
name = name.replace(".txt","")
name = name.replace("/instrs","")
name = name.replace("/Op_","/")
chunk = ps.find("pstext")
# list of things defined in this chunk
defs = { x.attrib['link'] for x in chunk.findall('anchor') }
# extract dependencies from hyperlinks in the XML
deps = { x.attrib['link'] for x in chunk.findall('a') if not x.text.startswith("SEE") }
# drop impl- prefixes in links
deps = { re.sub('(impl-\w+\.)','',x) for x in deps }
defs = { re.sub('(impl-\w+\.)','',x) for x in defs }
# drop file references in links
deps = { re.sub('([^#]+#)','',x) for x in deps }
code = ET.tostring(chunk, method="text").decode().rstrip()+"\n"
# workaround: patch operator precedence error
code = code.replace("= e - e MOD eltspersegment;", "= e - (e MOD eltspersegment);")
code = code.replace("= p - p MOD pairspersegment;", "= p - (p MOD pairspersegment);")
if alt_slice_syntax:
code = "\n".join(map(patchSlices, code.split('\n')))
return ASL(name, code, defs, deps)
'''
Classic ASL syntax has a syntax ambiguity involving the use of
angles (< and >) both to delimit bitslices and as comparision
operators.
We make parsing easier by converting bitslices to use square brackets
using a set of heuristics to distinguish bitslices from comparisions.
'''
def patchSlices(x):
reIndex = r'[0-9a-zA-Z_+*:\-()[\]., ]+'
rePart = reIndex
reParts = rePart+"(,"+rePart+")*"
x = re.sub("<("+reParts+")>", r'[\1]',x)
x = re.sub("<("+reParts+")>", r'[\1]',x)
x = re.sub("<("+reParts+")>", r'[\1]',x)
x = re.sub("<("+reParts+")>", r'[\1]',x)
return x
'''
Read encoding diagrams header found in encoding index XML
'''
def readDiagram(reg):
size = reg.attrib['form']
fields = []
for b in reg.findall('box'):
wd = int(b.attrib.get('width','1'))
hi = int(b.attrib['hibit'])
# normalise T16 reg bit numbers
lo = hi - wd + 1
fields.append((lo, wd))
return (size, fields)
def squote(s):
return "'"+s+"'"
'''
Convert a field in a decode table such as "111" or "!= 111" or None
to a legal ASL pattern
'''
def fieldToPattern(f):
if f:
return "!"+squote(f[3:]) if f.startswith('!= ') else squote(f)
else:
return "_"
'''
Read encoding diagrams entries found in encoding index XML
'''
def readDecode(d, columns):
values = {}
for b in d.findall('box'):
wd = int(b.attrib.get('width','1'))
hi = int(b.attrib['hibit'])
lo = hi - wd + 1
values[lo] = fieldToPattern(b.find('c').text)
return [ values.get(lo, "_") for (lo, _) in columns ]
def readIClass(c):
label = c.attrib['iclass']
allocated = c.attrib.get("unallocated", "0") == "0"
predictable = c.attrib.get("unpredictable", "0") == "0"
assert allocated or predictable
# print("Reading iclass "+label+" "+str(allocated)+" "+str(unpredictable))
return (label, allocated, predictable)
'''
'''
def readGroup(label, g):
# print("Reading group "+label)
diagram = readDiagram(g.find("regdiagram"))
# print("Diagram "+str(diagram))
children = []
for n in g.findall('node'):
dec = readDecode(n.find('decode'), diagram[1])
# print("Decode "+str(dec), diagram[1])
if 'iclass' in n.attrib:
i = readIClass(n)
children.append((dec, False, i))
elif 'groupname' in n.attrib:
nm = n.attrib['groupname']
g = readGroup(nm, n)
children.append((dec, True, g))
else:
assert False
return (label, diagram, children)
'''
'''
def readInstrName(dir, filename, encname):
filename = dir+"/"+filename
xml = ET.parse(filename)
for ic in xml.findall(".//iclass"):
decode = ic.find("regdiagram").attrib['psname']
for enc in ic.findall("encoding"):
if not encname or enc.attrib['name'] == encname:
decode = decode.replace(".txt","")
decode = decode.replace("/instrs","")
decode = decode.replace("-","_")
decode = decode.replace("/","_")
return decode
assert False
'''
'''
def readITables(dir, root):
classes = {}
funcgroup = None # hack: structure of XML is not quite hierarchial
for child in root.iter():
if child.tag == 'funcgroupheader':
funcgroup = child.attrib['id']
# print("Functional Group "+funcgroup)
elif child.tag == 'iclass_sect':
iclass_id = child.attrib['id']
fields = [ (b.attrib['name'], int(b.attrib['hibit']), int(b.attrib.get('width', 1))) for b in child.findall('regdiagram/box') if 'name' in b.attrib ]
# print("Group "+funcgroup +" "+ iclass_id +' '+str(fields))
tables = []
for i in child.findall('instructiontable'):
iclass = i.attrib['iclass']
headers = [ r.text for r in i.findall('thead/tr/th') if r.attrib['class'] == 'bitfields' ]
headers = [ patchTypeAsVar(nm) for nm in headers ] # workaround
# print("ITable "+funcgroup +" "+ iclass +" "+str(headers))
rows = []
for r in i.findall('tbody/tr'):
patterns = [ fieldToPattern(d.text) for d in r.findall('td') if d.attrib['class'] == 'bitfield' ]
undef = r.get('undef', '0') == '1'
unpred = r.get('unpred', '0') == '1'
nop = r.get('reserved_nop_hint', '0') == '1'
encname = r.get('encname')
nm = "_" if undef or unpred or nop else readInstrName(dir, r.attrib['iformfile'], encname)
rows.append((patterns, nm, encname, undef, unpred, nop))
tables.append((iclass, headers, rows))
# print(iclass, fields, headers, rows)
assert len(tables) == 1
# discard fields that are not used to select instruction
# fields = [ (nm, hi, wd) for (nm, hi, wd) in fields if nm in headers ]
fields = [ (patchTypeAsVar(nm), hi, wd) for (nm, hi, wd) in fields ] # workaround
classes[iclass_id] = (fields, tables[0])
return classes
'''
'''
def readDecodeFile(dir, file):
print("Reading decoder "+file)
root = ET.parse(file)
iset = root.getroot().attrib['instructionset']
groups = readGroup(iset, root.find('hierarchy'))
classes = readITables(dir, root)
return (groups, classes)
def ppslice(f):
(lo, wd) = f
return (str(lo) +" +: "+ str(wd))
def printITable(ofile, level, c):
(fields, (ic, hdr, rows)) = c
for (fnm, hi, wd) in fields:
print(" "*level + "__field "+ fnm +" "+str(hi-wd+1) +" +: "+str(wd), file=ofile)
print(" "*level +"case ("+ ", ".join(hdr) +") of", file=ofile)
for (pats, nm, encname, undef, unpred, nop) in rows:
nm = "__encoding "+deslash(nm)
if encname: nm = nm + " // " +encname
if undef: nm = "__UNALLOCATED"
if unpred: nm = "__UNPREDICTABLE"
if nop: nm = "__NOP"
print(" "*(level+1) +"when ("+ ", ".join(pats) +") => "+ nm, file=ofile)
return
def printDiagram(ofile, level, reg):
(size, fields) = reg
print(" "*level +"case ("+ ", ".join(map(ppslice, fields)) +") of", file=ofile)
return
def printGroup(ofile, classes, level, root):
(label, diagram, children) = root
print(" "*level + "// "+label, file=ofile)
printDiagram(ofile, level, diagram)
for (dec, isGroup, c) in children:
if isGroup:
print(" "*(level+1) +"when ("+ ", ".join(dec) +") =>", file=ofile)
printGroup(ofile, classes, level+2, c)
else:
(label, allocated, predictable) = c
tag = "// "+label
if allocated and predictable:
(fields, (ic, hdr, rows)) = classes[label]
print(" "*(level+1) +"when ("+ ", ".join(dec) +") => " +tag, file=ofile)
printITable(ofile, level+2, classes[label])
else:
if not allocated: tag = "__UNPREDICTABLE"
if not predictable: tag = "__UNALLOCATED"
print(" "*(level+1) +"when ("+ ", ".join(dec) +") => " +tag, file=ofile)
return
def printDecodeTree(ofile, groups, classes):
print("__decode", groups[0], file=ofile)
printGroup(ofile, classes, 1, groups)
'''
Read shared pseudocode files to extract ASL.
Result is sorted so that uses come before definitions.
'''
def readShared(files):
asl = {}
names = set()
for f in files:
xml = ET.parse(f)
for ps in xml.findall('.//ps_section/ps'):
r = readASL(ps)
# workaround: patch use of type as a variable name
r.patchTypeVar()
# workaround: patch SCTLR[] definition
if r.name == "aarch64/functions/sysregisters/SCTLR":
r.code = r.code.replace("bits(32) r;", "bits(64) r;")
# workaround: patch AArch64.CheckUnallocatedSystemAccess
if r.name == "aarch64/functions/system/AArch64.CheckUnallocatedSystemAccess":
r.code = r.code.replace("bits(2) op0,", "bits(2) el, bits(2) op0,")
# workaround: patch AArch64.CheckSystemAccess
if r.name == "aarch64/functions/system/AArch64.CheckSystemAccess":
r.code = r.code.replace("AArch64.CheckSVESystemRegisterTraps(op0, op1, crn, crm, op2);",
"AArch64.CheckSVESystemRegisterTraps(op0, op1, crn, crm, op2, read);")
# workaround: collect type definitions
for m in re.finditer('''(?m)^(enumeration|type)\s+(\S+)''',r.code):
r.defs.add(m.group(2))
names |= {m.group(2)}
# workaround: collect variable definitions
for m in re.finditer('''(?m)^(\S+)\s+([a-zA-Z_]\w+);''',r.code):
if m.group(1) != "type":
# print("variable declaration", m[1], m[2])
r.defs.add(m.group(2))
names |= {m.group(2)}
# workaround: collect array definitions
for m in re.finditer('''(?m)^array\s+(\S+)\s+([a-zA-Z_]\w+)''',r.code):
# print("array declaration", m[1], m[2])
v = m.group(2)+"["
r.defs.add(v)
names |= {v}
# workaround: collect variable accessors
for m in re.finditer('''(?m)^(\w\S+)\s+([a-zA-Z_]\w+)\s*$''',r.code):
# print("variable accessor", m[1], m[2])
r.defs.add(m.group(2))
names |= {m.group(2)}
# workaround: collect array accessors
for m in re.finditer('''(?m)^(\w\S+)\s+([a-zA-Z_]\w+)\[''',r.code):
# print("array accessor", m[1], m[2])
v = m.group(2)+"["
r.defs.add(v)
names |= {v}
# workaround: add PSTATE definition/dependency
if r.name == 'shared/functions/system/PSTATE': r.defs.add("PSTATE")
if "PSTATE" in r.code: r.deps.add("PSTATE")
# workaround: skip standard library functions
if r.name in [
'shared/functions/common/SInt',
'shared/functions/common/UInt',
'shared/functions/common/Ones',
'shared/functions/common/Zeros',
'shared/functions/common/IsOnes',
'shared/functions/common/IsZero',
'shared/functions/common/SignExtend',
'shared/functions/common/ZeroExtend',
'shared/functions/common/Replicate',
'shared/functions/common/RoundDown',
'shared/functions/common/RoundUp',
'shared/functions/common/RoundTowardsZero',
]:
continue
asl[r.name] = r
return (asl, names)
'''
Read ARM's license notice from an XML file.
Convert unicode characters to ASCII equivalents (e.g,, (C)).
Return a giant comment block containing the notice.
'''
def readNotice(xml):
# Read proprietary notice
notice = ['/'*72, "// Proprietary Notice"]
for p in xml.iter('para'):
para = ET.tostring(p, method='text').decode().rstrip()
para = para.replace("’", "'")
para = para.replace("“", '"')
para = para.replace("”", '"')
para = para.replace("™", '(TM)')
para = para.replace("©", '(C)')
para = para.replace("®", '(R)')
lines = [ ('// '+l).rstrip() for l in para.split('\n') ]
notice.extend(lines)
notice.append('/'*72)
return '\n'.join(notice)
def sanitize(name):
new_name = ""
for c in name:
if c not in string.ascii_letters and c not in string.digits:
new_name += "_"
else:
new_name += c
return new_name
# remove one level of indentation from code
def indent(code):
return [ " " + l for l in code ]
# remove one level of indentation from code
def unindent(code):
cs = []
for l in code:
if l != "" and l[0:4] != " ":
print("Malformed conditional code '" + l[0:4] +"'")
assert False
cs.append(l[4:])
return cs
# Execute ASL code often has a header like this:
#
# if ConditionPassed() then
# EncodingSpecificOperations();
#
# that we need to transform into a more usable form.
# Other patterns found are:
# - declaring an enumeration before the instruction
# - inserting another line of code between the first and second lines.
# eg "if PSTATE.EL == EL2 then UNPREDICTABLE;"
# - wrapping the entire instruction in
# "if code[0].startswith("if CurrentInstrSet() == InstrSet_A32 then"):
#
# Return value consists of (top, cond, dec, exec):
# - additional top level declarations (of enumerations)
# - boolean: is the instruction conditional?
# - additional decode logic (to be added to start of decode ASL)
# - demangled execute logic
def demangleExecuteASL(code):
tops = None
conditional = False
decode = None
if code[0].startswith("enumeration ") and code[1] == "":
tops = code[0]
code = code[2:]
if code[0].startswith("if CurrentInstrSet() == InstrSet_A32 then"):
first = code[0]
code = code[1:]
mid = code.index("else")
code1 = unindent(code[:mid])
code2= unindent(code[mid+1:])
(tops1, conditional1, decode1, code1) = demangleExecuteASL(code1)
(tops2, conditional2, decode2, code2) = demangleExecuteASL(code2)
assert tops1 == None and tops2 == None
assert conditional1 == conditional2
code = [first] + indent(code1) + ["else"] + indent(code2)
([], conditional1, "\n".join([decode1 or "", decode2 or ""]), code)
if code[0] == "if ConditionPassed() then":
conditional = True
code = code[1:] # delete first line
code = unindent(code)
if code[0] == "bits(128) result;":
tmp = code[0]
code[0] = code[1]
code[1] = tmp
elif len(code) >= 2 and code[1] == "EncodingSpecificOperations();":
decode = code[0]
code = code[1:]
if code[0].startswith("EncodingSpecificOperations();"):
rest = code[0][29:].strip()
if rest == "":
code = code[1:]
else:
code[0] = rest
return (tops, conditional, decode, code)
def readInstruction(xml,names,sailhack):
execs = xml.findall(".//pstext[@section='Execute']/..")
posts = xml.findall(".//pstext[@section='Postdecode']/..")
assert(len(posts) <= 1)
assert(len(execs) <= 1)
if not execs: return (None, None) # discard aliases
exec = readASL(execs[0])
post = readASL(posts[0]) if posts else None
if demangle_instr:
# demangle execute code
code = exec.code.splitlines()
(top, conditional, decode, execute) = demangleExecuteASL(code)
exec.code = '\n'.join(execute)
else:
top = None
conditional = False
decode = None
exec.patchDependencies(names)
if post: post.patchDependencies(names)
include_matches = include_regex is None or include_regex.search(exec.name)
exclude_matches = exclude_regex is not None and exclude_regex.search(exec.name)
if not include_matches or exclude_matches:
return None
# for each encoding, read instructions encoding, matching decode ASL and index
encs = []
for iclass in xml.findall('.//classes/iclass'):
encoding = iclass.find('regdiagram')
isT16 = encoding.attrib['form'] == "16"
insn_set = "T16" if isT16 else iclass.attrib['isa']
fields = []
for b in encoding.findall('box'):
wd = int(b.attrib.get('width','1'))
hi = int(b.attrib['hibit'])
lo = hi - wd + 1
nm = b.attrib.get('name', '_') if b.attrib.get('usename', '0') == '1' else '_'
# workaround for Sail
if sailhack and nm == 'type': nm = 'typ'
ignore = 'psbits' in b.attrib and b.attrib['psbits'] == 'x'*wd
consts = ''.join([ 'x'*int(c.attrib.get('colspan','1')) if c.text is None or ignore else c.text for c in b.findall('c') ])
# workaround: add explicit slicing to LDM/STM register_list fields
if nm == "register_list" and wd == 13: nm = nm + "<12:0>"
# if adjacent entries are two parts of same field, join them
# e.g., imm8<7:1> and imm8<0> or opcode[5:2] and opcode[1:0]
m = re.match('^(\w+)[<[]', nm)
if m:
nm = m.group(1)
split = True
if fields[-1][3] and fields[-1][2] == nm:
(hi1,lo1,_,_,c1) = fields.pop()
assert(lo1 == hi+1) # must be adjacent
hi = hi1
consts = c1+consts
else:
split = False
# discard != information because it is better obtained elsewhere in spec
if consts.startswith('!='): consts = 'x'*wd
fields.append((hi,lo,nm,split,consts))
# pad opcode with zeros for T16 so that all opcodes are 32 bits
if isT16:
fields.append((15,0,'_',False,'0'*16))
# workaround: avoid use of overloaded field names
fields2 = []
for (hi, lo, nm, split, consts) in fields:
if (nm in ["SP", "mask", "opcode"]
and 'x' not in consts
and exec.name not in ["aarch64/float/convert/fix", "aarch64/float/convert/int"]):
# workaround: avoid use of overloaded field name
nm = '_'
fields2.append((hi,lo,nm,split,consts))
dec_asl = readASL(iclass.find('ps_section/ps'))
if decode: dec_asl.code = decode +"\n"+ dec_asl.code
dec_asl.patchDependencies(names)
name = dec_asl.name if insn_set in ["T16","T32","A32"] else encoding.attrib['psname']
encs.append((name, insn_set, fields2, dec_asl))
return (Instruction(exec.name, encs, post, conditional, exec), top)
########################################################################
# Reachability analysis
########################################################################
# Visit all nodes reachable from roots
# Returns topologically sorted list of reachable nodes
# and set of reachable nodes.
def reachable(graph, roots):
visited = set()
sorted = []
def worker(seen, f):
if f in seen:
# print("Cyclic dependency",f)
pass
elif f not in visited:
visited.add(f)
deps = list(graph[f])
deps.sort()
for g in deps: worker(seen + [f], g)
sorted.append(f)
roots = list(roots)
roots.sort()
for f in roots: worker([], f)
return (sorted, visited)
########################################################################
# Canary detection
########################################################################
# Check all paths from a function 'f' to any function in the list 'canaries'
# and report every such path.
# 'callers' is a reversed callgraph (from callees back to callers)
# Prints paths in reverse order (starting function first, root last) because that
# helps identify the common paths to the the starting function f
#
# Usage is to iterate over all canaries 'f' searching for paths that should not exist
def checkCanaries(callers, isChunk, roots, f, path):
if f in path: # ignore recursion
pass
elif f in roots:
path = [ g for g in path+[f] if not isChunk(g) ]
print(" Canary "+" ".join(path))
elif callers[f]:
path = path + [f]
for g in callers[f]:
checkCanaries(callers, isChunk, roots, g, path)
########################################################################
# Main
########################################################################
def main():
global alt_slice_syntax
global include_regex
global exclude_regex
global demangle_instr
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--verbose', '-v', help='Use verbose output',
action = 'count', default=0)
parser.add_argument('--altslicesyntax', help='Convert to alternative slice syntax',
action='store_true', default=False)
parser.add_argument('--sail_asts', help='Output Sail file for AST clauses',
metavar='FILE', default=None)
parser.add_argument('--demangle', help='Demangle instruction ASL',
action='store_true', default=False)
parser.add_argument('--output', '-o', help='Basename for output files',
metavar='FILE', default='arch')
parser.add_argument('dir', metavar='<dir>', nargs='+',
help='input directories')
parser.add_argument('--filter', help='Optional input json file to filter definitions',
metavar='FILE', default=[], nargs='*')
parser.add_argument('--arch', help='Optional list of architecture states to extract',
choices=["AArch32", "AArch64"], default=[], action='append')
parser.add_argument('--include', help='Regex to select instructions by name',
metavar='REGEX', default=None)
parser.add_argument('--exclude', help='Regex to exclude instructions by name',
metavar='REGEX', default=None)
args = parser.parse_args()
alt_slice_syntax = args.altslicesyntax
if args.include is not None:
include_regex = re.compile(args.include)
if args.exclude is not None:
exclude_regex = re.compile(args.exclude)
demangle_instr = args.demangle
encodings = []
if "AArch32" in args.arch: encodings.extend(["T16", "T32", "A32"])
if "AArch64" in args.arch: encodings.extend(["A64"])
if args.verbose > 0:
if encodings != []:
print("Selecting encodings", ", ".join(encodings))
else:
print("Selecting entire architecture")
notice = readNotice(ET.parse(os.path.join(args.dir[0], 'notice.xml')))
(shared,names) = readShared([ f for d in args.dir for f in glob.glob(os.path.join(d, 'shared_pseudocode.xml'))])
# reverse mapping of names back to the chunks containing them
chunks = {}
for a in shared.values():
for d in a.defs:
chunks[d] = a
for a in shared.values():
a.patchDependencies(chunks)
decoder_files = [ 'encodingindex.xml', 't32_encindex.xml', 'a32_encindex.xml' ]
decoders = [ readDecodeFile(d, f) for df in decoder_files for d in args.dir for f in glob.glob(os.path.join(d, df)) ]
sailhack = args.sail_asts is not None
instrs = []
tops = []
for d in args.dir:
for inf in glob.glob(os.path.join(d, '*.xml')):
name = re.search('.*/(\S+).xml',inf).group(1)
if name == "onebigfile": continue
xml = ET.parse(inf)
(instr, top) = readInstruction(xml,chunks,sailhack)
if top: tops.append(top)
if instr is None: continue
if encodings != []: # discard encodings from unwanted InsnSets
encs = [ e for e in instr.encs if e[1] in encodings ]
if encs == []:
if args.verbose > 1: print("Discarding", instr.name, encodings)
continue
instr.encs = encs
instrs.append(instr)
# Having read everything in, decide which parts to write
# back out again and in what order
if args.verbose > 3:
for f in shared.values():
print("Dependencies", f.name, "=", str(f.deps))
print("Definitions", f.name, "=", str(f.defs))
roots = set()
cuts = set()
canaries = set()
for fn in args.filter:
with open(fn, "r") as f:
try:
filter = json.load(f)
except ValueError as err:
print(err)
sys.exit(1)
for fun in filter['roots']:
if fun not in chunks: print("Warning: unknown root", fun)
roots.add(fun)
for fun in filter['cuts']:
if fun not in chunks: print("Warning: unknown cut", fun)
cuts.add(fun)
for fun in filter['canaries']:
if fun not in chunks: print("Warning: unknown canary", fun)
canaries.add(fun)
# treat instrs as a list of rexexps
patterns = [ re.compile(p) for p in filter['instructions'] ]
instrs = [ i for i in instrs
if any(regex.match(i.name) for regex in patterns)
]
# print("\n".join(sorted([ i.name for i in instrs ])))
# print("\n".join(sorted(chunks.keys())))
# Replace all cutpoints with a stub so that we keep dependencies
# on the argument/result types but drop the definition and any
# dependencies on the definition.
for x,s in shared.items():
if any([d in cuts for d in s.defs]):
if args.verbose > 0: print("Cutting", x)
t = s.toPrototype()
t.patchDependencies(chunks)
# print("Cut", t)
shared[x] = t
# build bipartite graph consisting of chunk names and functions
deps = defaultdict(set) # dependencies between functions
for a in shared.values():
deps[a.name] = a.deps
for d in a.defs:
deps[d] = {a.name}
if args.verbose > 2:
for f in deps: print("Dependency", f, "on", str(deps[f]))
if encodings == [] and args.filter == []:
# default: you get everything
if args.verbose > 0: print("Keeping entire specification")
roots |= { x for x in shared }
else:
if args.verbose > 0: print("Discarding definitions unreachable from",
", ".join(encodings), " instructions")
for i in instrs:
for (_,_,_,dec) in i.encs: roots |= dec.deps
if i.post: roots |= i.post.deps
roots |= i.exec.deps
(live, _) = reachable(deps, roots)
# Check whether canaries can be reached from roots
if canaries != set():
if args.verbose > 0: print("Checking unreachability of", ", ".join(canaries))
rcg = defaultdict(set) # reverse callgraph
for f, ds in deps.items():
for d in ds:
rcg[d].add(f)
for canary in canaries:
if canary in live:
checkCanaries(rcg, lambda x: x in shared, roots, canary, [])
# print("Live:", " ".join(live))
# print()
# print("Shared", " ".join(shared.keys()))
live_chunks = [ shared[x] for x in live if x in shared ]
tagfile = args.output + ".tag"
instrfile = args.output + "_instrs.asl"
decodefile = args.output + "_decode.asl"
aslfile = args.output + ".asl"
if args.verbose > 0: print("Writing instruction encodings to", tagfile)
with open(tagfile, "w") as outf:
emit(outf, 'notice:asl', notice)
for i in instrs:
i.emit_tag_syntax(outf)
if args.verbose > 0: print("Writing instructions to", instrfile)
with open(instrfile, "w") as outf:
print(notice, file=outf)
print(file=outf)
for i in instrs:
i.emit_asl_syntax(outf)
print(file=outf)
print('/'*72, file=outf)
print('// End', file=outf)
print('/'*72, file=outf)
if args.verbose > 0: print("Writing instruction decoder to", decodefile)
with open(decodefile, "w") as ofile:
for (groups, classes) in decoders: printDecodeTree(ofile, groups, classes)
if args.verbose > 0: print("Writing ASL definitions to", aslfile)
with open(aslfile, "w") as outf:
print(notice, file=outf)
print(file=outf)
print('\n'.join([ t for t in tops ]), file=outf)
print('\n'.join([ x.code for x in live_chunks ]), file=outf)
print('/'*72, file=outf)
print('// End', file=outf)
print('/'*72, file=outf)
if args.sail_asts is not None:
if args.verbose > 0: print("Writing Sail ast clauses to", args.sail_asts)
with open(args.sail_asts, "w") as outf:
print(notice, file=outf, end='\n\n')
print('scattered union ast', file=outf, end='\n\n')
previous_clauses = set()
for i in instrs:
i.emit_sail_ast(previous_clauses, outf)
print('\nend ast', file=outf)
return
if __name__ == "__main__":
sys.exit(main())
########################################################################
# End
########################################################################
|
alastairreid/mra_tools
|
bin/instrs2asl.py
|
Python
|
bsd-3-clause
| 39,190
|
[
"VisIt"
] |
b74d4be93bc6bca09ae9316397a035668411bf84e25fb50c636330d568926e0d
|
#import nsdf
#import table
#import swc
#import neuroml
import moose
__all__ = ["moose"]
#"nsdf",
# "table",
# "swc",
# "neuroml",
# "moose"]
|
dilawar/moogli
|
moogli/extensions/__init__.py
|
Python
|
gpl-2.0
| 182
|
[
"MOOSE"
] |
166d73b6f5ecfc6437814a848a17ab350e3c059bc8ea484b0379e73864894ca9
|
# Copyright 2007-2009 by Peter Cock. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package..
"""Bio.SeqIO support for the "genbank" and "embl" file formats.
You are expected to use this module via the Bio.SeqIO functions.
Note that internally this module calls Bio.GenBank to do the actual
parsing of both GenBank and EMBL files.
See also:
International Nucleotide Sequence Database Collaboration
http://www.insdc.org/
GenBank
http://www.ncbi.nlm.nih.gov/Genbank/
EMBL Nucleotide Sequence Database
http://www.ebi.ac.uk/embl/
DDBJ (DNA Data Bank of Japan)
http://www.ddbj.nig.ac.jp/
"""
from Bio.Seq import UnknownSeq
from Bio.GenBank.Scanner import GenBankScanner, EmblScanner
from Bio import Alphabet
from Interfaces import SequentialSequenceWriter
from Bio import SeqFeature
# NOTE
# ====
# The "brains" for parsing GenBank and EMBL files (and any
# other flat file variants from the INSDC in future) is in
# Bio.GenBank.Scanner (plus the _FeatureConsumer in Bio.GenBank)
# However, all the writing code is in this file.
def GenBankIterator(handle):
"""Breaks up a Genbank file into SeqRecord objects.
Every section from the LOCUS line to the terminating // becomes
a single SeqRecord with associated annotation and features.
Note that for genomes or chromosomes, there is typically only
one record."""
#This calls a generator function:
return GenBankScanner(debug=0).parse_records(handle)
def EmblIterator(handle):
"""Breaks up an EMBL file into SeqRecord objects.
Every section from the LOCUS line to the terminating // becomes
a single SeqRecord with associated annotation and features.
Note that for genomes or chromosomes, there is typically only
one record."""
#This calls a generator function:
return EmblScanner(debug=0).parse_records(handle)
def GenBankCdsFeatureIterator(handle, alphabet=Alphabet.generic_protein):
"""Breaks up a Genbank file into SeqRecord objects for each CDS feature.
Every section from the LOCUS line to the terminating // can contain
many CDS features. These are returned as with the stated amino acid
translation sequence (if given).
"""
#This calls a generator function:
return GenBankScanner(debug=0).parse_cds_features(handle, alphabet)
def EmblCdsFeatureIterator(handle, alphabet=Alphabet.generic_protein):
"""Breaks up a EMBL file into SeqRecord objects for each CDS feature.
Every section from the LOCUS line to the terminating // can contain
many CDS features. These are returned as with the stated amino acid
translation sequence (if given).
"""
#This calls a generator function:
return EmblScanner(debug=0).parse_cds_features(handle, alphabet)
def _insdc_feature_position_string(pos, offset=0):
"""Build a GenBank/EMBL position string (PRIVATE).
Use offset=1 to add one to convert a start position from python counting.
"""
if isinstance(pos, SeqFeature.ExactPosition):
return "%i" % (pos.position+offset)
elif isinstance(pos, SeqFeature.WithinPosition):
return "(%i.%i)" % (pos.position + offset,
pos.position + pos.extension + offset)
elif isinstance(pos, SeqFeature.BetweenPosition):
return "(%i^%i)" % (pos.position + offset,
pos.position + pos.extension + offset)
elif isinstance(pos, SeqFeature.BeforePosition):
return "<%i" % (pos.position + offset)
elif isinstance(pos, SeqFeature.AfterPosition):
return ">%i" % (pos.position + offset)
elif isinstance(pos, SeqFeature.OneOfPosition):
return "one-of(%s)" \
% ",".join([_insdc_feature_position_string(p,offset) \
for p in pos.position_choices])
elif isinstance(pos, SeqFeature.AbstractPosition):
raise NotImplementedError("Please report this as a bug in Biopython.")
else:
raise ValueError("Expected a SeqFeature position object.")
def _insdc_location_string_ignoring_strand_and_subfeatures(feature):
if feature.ref:
ref = "%s:" % feature.ref
else:
ref = ""
assert not feature.ref_db
if feature.location.start==feature.location.end \
and isinstance(feature.location.end, SeqFeature.ExactPosition):
#Special case, 12^13 gets mapped to location 12:12
#(a zero length slice, meaning the point between two letters)
return "%s%i^%i" % (ref, feature.location.end.position,
feature.location.end.position+1)
else:
#Typical case, e.g. 12..15 gets mapped to 11:15
return ref \
+ _insdc_feature_position_string(feature.location.start, +1) \
+ ".." + \
_insdc_feature_position_string(feature.location.end)
def _insdc_feature_location_string(feature):
"""Build a GenBank/EMBL location string from a SeqFeature (PRIVATE)."""
# Have a choice of how to show joins on the reverse complement strand,
# complement(join(1,10),(20,100)) vs join(complement(20,100),complement(1,10))
# Notice that the order of the entries gets flipped!
#
# GenBank and EMBL would both use now complement(join(1,10),(20,100))
# which is shorter at least.
#
# In the above situations, we expect the parent feature and the two children
# to all be marked as strand==-1, and in the order 0:10 then 19:100.
#
# Also need to consider dual-strand examples like these from the Arabidopsis
# thaliana chloroplast NC_000932: join(complement(69611..69724),139856..140650)
# gene ArthCp047, GeneID:844801 or its CDS which is even better due to a splice:
# join(complement(69611..69724),139856..140087,140625..140650)
# protein NP_051038.1 GI:7525057
#
if not feature.sub_features:
#Non-recursive.
#assert feature.location_operator == "", \
# "%s has no subfeatures but location_operator %s" \
# % (repr(feature), feature.location_operator)
location = _insdc_location_string_ignoring_strand_and_subfeatures(feature)
if feature.strand == -1:
location = "complement(%s)" % location
return location
# As noted above, treat reverse complement strand features carefully:
if feature.strand == -1:
for f in feature.sub_features:
assert f.strand == -1
return "complement(%s(%s))" \
% (feature.location_operator,
",".join(_insdc_location_string_ignoring_strand_and_subfeatures(f) \
for f in feature.sub_features))
#if feature.strand == +1:
# for f in feature.sub_features:
# assert f.strand == +1
#This covers typical forward strand features, and also an evil mixed strand:
assert feature.location_operator != ""
return "%s(%s)" % (feature.location_operator,
",".join([_insdc_feature_location_string(f) \
for f in feature.sub_features]))
class GenBankWriter(SequentialSequenceWriter):
HEADER_WIDTH = 12
MAX_WIDTH = 80
QUALIFIER_INDENT = 21
def _write_single_line(self, tag, text):
"Used in the the 'header' of each GenBank record."""
assert len(tag) < self.HEADER_WIDTH
assert len(text) < self.MAX_WIDTH - self.HEADER_WIDTH, \
"Annotation %s too long for %s line" % (repr(text), tag)
self.handle.write("%s%s\n" % (tag.ljust(self.HEADER_WIDTH),
text.replace("\n"," ")))
def _write_multi_line(self, tag, text):
"Used in the the 'header' of each GenBank record."""
#TODO - Do the line spliting while preserving white space?
max_len = self.MAX_WIDTH - self.HEADER_WIDTH
assert len(tag) < self.HEADER_WIDTH
text = text.strip()
if len(text) < max_len:
self._write_single_line(tag, text)
return
words = text.split()
assert max([len(w) for w in words]) < max_len, \
"Your description cannot be broken into nice lines!"
text = ""
while words and len(text) + 1 + len(words[0]) < max_len:
text += " " + words.pop(0)
text = text.strip()
assert len(text) < max_len
self._write_single_line(tag, text)
while words:
text = ""
while words and len(text) + 1 + len(words[0]) < max_len:
text += " " + words.pop(0)
text = text.strip()
assert len(text) < max_len
self._write_single_line("", text)
assert not words
def _write_multi_entries(self, tag, text_list):
#used for DBLINK and any similar later line types.
#If the list of strings is empty, nothing is written.
for i, text in enumerate(text_list):
if i==0:
self._write_single_line(tag, text)
else:
self._write_single_line("", text)
def _get_date(self, record) :
default = "01-JAN-1980"
try :
date = record.annotations["date"]
except KeyError :
return default
#Cope with a list of one string:
if isinstance(date, list) and len(date)==1 :
date = date[0]
#TODO - allow a Python date object
if not isinstance(date, str) or len(date) != 11 \
or date[2] != "-" or date[6] != "-" \
or not date[:2].isdigit() or not date[7:].isdigit() \
or int(date[:2]) > 31 \
or date[3:6] not in ["JAN","FEB","MAR","APR","MAY","JUN",
"JUL","AUG","SEP","OCT","NOV","DEC"] :
#TODO - Check is a valid date (e.g. not 31 Feb)
return default
return date
def _write_the_first_line(self, record):
"""Write the LOCUS line."""
locus = record.name
if not locus or locus == "<unknown name>":
locus = record.id
if not locus or locus == "<unknown id>":
locus = self._get_annotation_str(record, "accession", just_first=True)
if len(locus) > 16:
raise ValueError("Locus identifier %s is too long" % repr(locus))
if len(record) > 99999999999:
#Currently GenBank only officially support up to 350000, but
#the length field can take eleven digits
raise ValueError("Sequence too long!")
#Get the base alphabet (underneath any Gapped or StopCodon encoding)
a = Alphabet._get_base_alphabet(record.seq.alphabet)
if not isinstance(a, Alphabet.Alphabet):
raise TypeError("Invalid alphabet")
elif isinstance(a, Alphabet.ProteinAlphabet):
units = "aa"
elif isinstance(a, Alphabet.NucleotideAlphabet):
units = "bp"
else:
#Must be something like NucleotideAlphabet or
#just the generic Alphabet (default for fasta files)
raise ValueError("Need a Nucleotide or Protein alphabet")
#Get the molecule type
#TODO - record this explicitly in the parser?
if isinstance(a, Alphabet.ProteinAlphabet):
mol_type = ""
elif isinstance(a, Alphabet.DNAAlphabet):
mol_type = "DNA"
elif isinstance(a, Alphabet.RNAAlphabet):
mol_type = "RNA"
else:
#Must be something like NucleotideAlphabet or
#just the generic Alphabet (default for fasta files)
raise ValueError("Need a DNA, RNA or Protein alphabet")
try:
division = record.annotations["data_file_division"]
except KeyError:
division = "UNK"
if division not in ["PRI","ROD","MAM","VRT","INV","PLN","BCT",
"VRL","PHG","SYN","UNA","EST","PAT","STS",
"GSS","HTG","HTC","ENV","CON"]:
division = "UNK"
assert len(units) == 2
assert len(division) == 3
#TODO - date
#TODO - mol_type
line = "LOCUS %s %s %s %s %s %s\n" \
% (locus.ljust(16),
str(len(record)).rjust(11),
units,
mol_type.ljust(6),
division,
self._get_date(record))
assert len(line) == 79+1, repr(line) #plus one for new line
assert line[12:28].rstrip() == locus, \
'LOCUS line does not contain the locus at the expected position:\n' + line
assert line[28:29] == " "
assert line[29:40].lstrip() == str(len(record)), \
'LOCUS line does not contain the length at the expected position:\n' + line
#Tests copied from Bio.GenBank.Scanner
assert line[40:44] in [' bp ', ' aa '] , \
'LOCUS line does not contain size units at expected position:\n' + line
assert line[44:47] in [' ', 'ss-', 'ds-', 'ms-'], \
'LOCUS line does not have valid strand type (Single stranded, ...):\n' + line
assert line[47:54].strip() == "" \
or line[47:54].strip().find('DNA') != -1 \
or line[47:54].strip().find('RNA') != -1, \
'LOCUS line does not contain valid sequence type (DNA, RNA, ...):\n' + line
assert line[54:55] == ' ', \
'LOCUS line does not contain space at position 55:\n' + line
assert line[55:63].strip() in ['','linear','circular'], \
'LOCUS line does not contain valid entry (linear, circular, ...):\n' + line
assert line[63:64] == ' ', \
'LOCUS line does not contain space at position 64:\n' + line
assert line[67:68] == ' ', \
'LOCUS line does not contain space at position 68:\n' + line
assert line[70:71] == '-', \
'LOCUS line does not contain - at position 71 in date:\n' + line
assert line[74:75] == '-', \
'LOCUS line does not contain - at position 75 in date:\n' + line
self.handle.write(line)
def _get_annotation_str(self, record, key, default=".", just_first=False):
"""Get an annotation dictionary entry (as a string).
Some entries are lists, in which case if just_first=True the first entry
is returned. If just_first=False (default) this verifies there is only
one entry before returning it."""
try:
answer = record.annotations[key]
except KeyError:
return default
if isinstance(answer, list):
if not just_first : assert len(answer) == 1
return str(answer[0])
else:
return str(answer)
def _write_comment(self, record):
#This is a bit complicated due to the range of possible
#ways people might have done their annotation...
#Currently the parser uses a single string with newlines.
#A list of lines is also reasonable.
#A single (long) string is perhaps the most natural of all.
#This means we may need to deal with line wrapping.
comment = record.annotations["comment"]
if isinstance(comment, basestring):
lines = comment.split("\n")
elif isinstance(comment, list) or isinstance(comment, tuple):
lines = comment
else:
raise ValueError("Could not understand comment annotation")
self._write_multi_line("COMMENT",lines[0])
for line in lines[1:]:
self._write_multi_line("",line)
def _write_contig(self, record):
#TODO - Merge this with _write_multi_line method?
#It would need the addition of the comma splitting logic...
#are there any other cases where that would be sensible?
max_len = self.MAX_WIDTH - self.HEADER_WIDTH
contig = record.annotations.get("contig","")
if isinstance(contig, list) or isinstance(contig, tuple):
contig = "".join(contig)
contig = self.clean(contig)
i=0
while contig:
if len(contig) > max_len:
#Split lines at the commas
pos = contig[:max_len-1].rfind(",")
if pos==-1:
raise ValueError("Could not break up CONTIG")
text, contig = contig[:pos+1], contig[pos+1:]
else:
text, contig = contig, ""
if i==0:
self._write_single_line("CONTIG",text)
else:
self._write_single_line("",text)
i+=1
def _write_sequence(self, record):
#Loosely based on code from Howard Salis
#TODO - Force lower case?
LETTERS_PER_LINE = 60
SEQUENCE_INDENT = 9
if isinstance(record.seq, UnknownSeq):
#We have already recorded the length, and there is no need
#to record a long sequence of NNNNNNN...NNN or whatever.
if "contig" in record.annotations:
self._write_contig(record)
else:
self.handle.write("ORIGIN\n")
return
data = self._get_seq_string(record) #Catches sequence being None
seq_len = len(data)
self.handle.write("ORIGIN\n")
for line_number in range(0,seq_len,LETTERS_PER_LINE):
self.handle.write(str(line_number+1).rjust(SEQUENCE_INDENT))
for words in range(line_number,min(line_number+LETTERS_PER_LINE,seq_len),10):
self.handle.write(" %s" % data[words:words+10])
self.handle.write("\n")
def write_record(self, record):
"""Write a single record to the output file."""
handle = self.handle
self._write_the_first_line(record)
accession = self._get_annotation_str(record, "accession",
record.id.split(".",1)[0],
just_first=True)
acc_with_version = accession
if record.id.startswith(accession+"."):
try:
acc_with_version = "%s.%i" \
% (accession, int(record.id.split(".",1)[1]))
except ValueError:
pass
gi = self._get_annotation_str(record, "gi", just_first=True)
descr = record.description
if descr == "<unknown description>" : descr = "."
self._write_multi_line("DEFINITION", descr)
self._write_single_line("ACCESSION", accession)
if gi != ".":
self._write_single_line("VERSION", "%s GI:%s" % (acc_with_version,gi))
else:
self._write_single_line("VERSION", "%s" % (acc_with_version))
#The NCBI only expect two types of link so far,
#e.g. "Project:28471" and "Trace Assembly Archive:123456"
#TODO - Filter the dbxrefs list to just these?
self._write_multi_entries("DBLINK", record.dbxrefs)
try:
#List of strings
keywords = "; ".join(record.annotations["keywords"])
except KeyError:
keywords = "."
self._write_multi_line("KEYWORDS", keywords)
if "segment" in record.annotations:
#Deal with SEGMENT line found only in segmented records,
#e.g. AH000819
segment = record.annotations["segment"]
if isinstance(segment, list):
assert len(segment)==1, segment
segment = segment[0]
self._write_single_line("SEGMENT", segment)
self._write_multi_line("SOURCE", \
self._get_annotation_str(record, "source"))
#The ORGANISM line MUST be a single line, as any continuation is the taxonomy
org = self._get_annotation_str(record, "organism")
if len(org) > self.MAX_WIDTH - self.HEADER_WIDTH:
org = org[:self.MAX_WIDTH - self.HEADER_WIDTH-4]+"..."
self._write_single_line(" ORGANISM", org)
try:
#List of strings
taxonomy = "; ".join(record.annotations["taxonomy"])
except KeyError:
taxonomy = "."
self._write_multi_line("", taxonomy)
#TODO - References...
if "comment" in record.annotations:
self._write_comment(record)
handle.write("FEATURES Location/Qualifiers\n")
for feature in record.features:
self._write_feature(feature)
self._write_sequence(record)
handle.write("//\n")
def _write_feature_qualifier(self, key, value=None, quote=None):
if not value:
self.handle.write("%s/%s\n" % (" "*self.QUALIFIER_INDENT, key))
return
#Quick hack with no line wrapping, may be useful for testing:
#self.handle.write('%s/%s="%s"\n' % (" "*self.QUALIFIER_INDENT, key, value))
if quote is None:
#Try to mimic unwritten rules about when quotes can be left out:
if isinstance(value, int) or isinstance(value, long):
quote = False
else:
quote = True
if quote:
line = '%s/%s="%s"' % (" "*self.QUALIFIER_INDENT, key, value)
else:
line = '%s/%s=%s' % (" "*self.QUALIFIER_INDENT, key, value)
if len(line) < self.MAX_WIDTH:
self.handle.write(line+"\n")
return
while line.lstrip():
if len(line) < self.MAX_WIDTH:
self.handle.write(line+"\n")
return
#Insert line break...
for index in range(min(len(line)-1,self.MAX_WIDTH),self.QUALIFIER_INDENT+1,-1):
if line[index]==" " : break
if line[index] != " ":
#No nice place to break...
index = self.MAX_WIDTH
self.handle.write(line[:index] + "\n")
line = " "*self.QUALIFIER_INDENT + line[index:].lstrip()
def _wrap_location(self, location):
"""Split a feature location into lines (break at commas)."""
#TODO - Rewrite this not to recurse!
length = self.MAX_WIDTH - self.QUALIFIER_INDENT
if len(location) <= length:
return location
index = location[:length].rfind(",")
if index == -1:
#No good place to split (!)
import warnings
warnings.warn("Couldn't split location:\n%s" % location)
return location
return location[:index+1] + "\n" + \
" "*self.QUALIFIER_INDENT + self._wrap_location(location[index+1:])
def _write_feature(self, feature):
"""Write a single SeqFeature object to features table."""
assert feature.type, feature
#TODO - Line wrapping for long locations!
location = _insdc_feature_location_string(feature)
line = (" %s " % feature.type)[:self.QUALIFIER_INDENT] \
+ self._wrap_location(location) + "\n"
self.handle.write(line)
#Now the qualifiers...
for key, values in feature.qualifiers.iteritems():
if isinstance(values, list) or isinstance(values, tuple):
for value in values:
self._write_feature_qualifier(key, value)
elif values:
#String, int, etc
self._write_feature_qualifier(key, values)
else:
#e.g. a /psuedo entry
self._write_feature_qualifier(key)
if __name__ == "__main__":
print "Quick self test"
import os
from StringIO import StringIO
def compare_record(old, new):
if old.id != new.id and old.name != new.name:
raise ValueError("'%s' or '%s' vs '%s' or '%s' records" \
% (old.id, old.name, new.id, new.name))
if len(old.seq) != len(new.seq):
raise ValueError("%i vs %i" % (len(old.seq), len(new.seq)))
if str(old.seq).upper() != str(new.seq).upper():
if len(old.seq) < 200:
raise ValueError("'%s' vs '%s'" % (old.seq, new.seq))
else:
raise ValueError("'%s...' vs '%s...'" % (old.seq[:100], new.seq[:100]))
if old.features and new.features:
return compare_features(old.features, new.features)
#Just insist on at least one word in common:
if (old.description or new.description) \
and not set(old.description.split()).intersection(new.description.split()):
raise ValueError("%s versus %s" \
% (repr(old.description), repr(new.description)))
#TODO - check annotation
if "contig" in old.annotations:
assert old.annotations["contig"] == \
new.annotations["contig"]
return True
def compare_records(old_list, new_list):
"""Check two lists of SeqRecords agree, raises a ValueError if mismatch."""
if len(old_list) != len(new_list):
raise ValueError("%i vs %i records" % (len(old_list), len(new_list)))
for old, new in zip(old_list, new_list):
if not compare_record(old,new):
return False
return True
def compare_feature(old, new, ignore_sub_features=False):
"""Check two SeqFeatures agree."""
if old.type != new.type:
raise ValueError("Type %s versus %s" % (old.type, new.type))
if old.location.nofuzzy_start != new.location.nofuzzy_start \
or old.location.nofuzzy_end != new.location.nofuzzy_end:
raise ValueError("%s versus %s:\n%s\nvs:\n%s" \
% (old.location, new.location, str(old), str(new)))
if old.strand != new.strand:
raise ValueError("Different strand:\n%s\nvs:\n%s" % (str(old), str(new)))
if old.location.start != new.location.start:
raise ValueError("Start %s versus %s:\n%s\nvs:\n%s" \
% (old.location.start, new.location.start, str(old), str(new)))
if old.location.end != new.location.end:
raise ValueError("End %s versus %s:\n%s\nvs:\n%s" \
% (old.location.end, new.location.end, str(old), str(new)))
if not ignore_sub_features:
if len(old.sub_features) != len(new.sub_features):
raise ValueError("Different sub features")
for a,b in zip(old.sub_features, new.sub_features):
if not compare_feature(a,b):
return False
#This only checks key shared qualifiers
#Would a white list be easier?
#for key in ["name","gene","translation","codon_table","codon_start","locus_tag"]:
for key in set(old.qualifiers.keys()).intersection(new.qualifiers.keys()):
if key in ["db_xref","protein_id","product","note"]:
#EMBL and GenBank files are use different references/notes/etc
continue
if old.qualifiers[key] != new.qualifiers[key]:
raise ValueError("Qualifier mis-match for %s:\n%s\n%s" \
% (key, old.qualifiers[key], new.qualifiers[key]))
return True
def compare_features(old_list, new_list, ignore_sub_features=False):
"""Check two lists of SeqFeatures agree, raises a ValueError if mismatch."""
if len(old_list) != len(new_list):
raise ValueError("%i vs %i features" % (len(old_list), len(new_list)))
for old, new in zip(old_list, new_list):
#This assumes they are in the same order
if not compare_feature(old,new,ignore_sub_features):
return False
return True
def check_genbank_writer(records):
handle = StringIO()
GenBankWriter(handle).write_file(records)
handle.seek(0)
records2 = list(GenBankIterator(handle))
assert compare_records(records, records2)
for filename in os.listdir("../../Tests/GenBank"):
if not filename.endswith(".gbk") and not filename.endswith(".gb"):
continue
print filename
handle = open("../../Tests/GenBank/%s" % filename)
records = list(GenBankIterator(handle))
handle.close()
check_genbank_writer(records)
for filename in os.listdir("../../Tests/EMBL"):
if not filename.endswith(".embl"):
continue
print filename
handle = open("../../Tests/EMBL/%s" % filename)
records = list(EmblIterator(handle))
handle.close()
check_genbank_writer(records)
from Bio import SeqIO
for filename in os.listdir("../../Tests/SwissProt"):
if not filename.startswith("sp"):
continue
print filename
handle = open("../../Tests/SwissProt/%s" % filename)
records = list(SeqIO.parse(handle,"swiss"))
handle.close()
check_genbank_writer(records)
|
NirBenTalLab/proorigami-cde-package
|
cde-root/usr/lib64/python2.4/site-packages/Bio/SeqIO/InsdcIO.py
|
Python
|
mit
| 29,113
|
[
"Biopython"
] |
3e64aefe2f5c89030e9f495f6d80d55438a27b037e9bf5aefd77db5445c20f9d
|
"""Test case for autocomplete implementations."""
import os
import uuid
from django import VERSION
from django.contrib.contenttypes.models import ContentType
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.urlresolvers import reverse
from django.utils import six
from splinter import Browser
GLOBAL_BROWSER = None
class AutocompleteTestCase(StaticLiveServerTestCase):
"""Provide a class-persistent selenium instance and assertions."""
@classmethod
def setUpClass(cls):
"""Instanciate a browser for the whole test session."""
global GLOBAL_BROWSER
if GLOBAL_BROWSER is None:
GLOBAL_BROWSER = Browser(os.environ.get('BROWSER', 'firefox'))
cls.browser = GLOBAL_BROWSER
super(AutocompleteTestCase, cls).setUpClass()
def get(self, url):
"""Open a URL."""
self.browser.visit('%s%s' % (
self.live_server_url,
url
))
if '/admin/login/' in self.browser.url:
# Should be pre-filled by HTML template
# self.browser.fill('username', 'test')
# self.browser.fill('password', 'test')
self.browser.find_by_value('Log in').click()
def click(self, selector):
"""Click an element by css selector."""
self.browser.find_by_css(selector).click()
def enter_text(self, selector, text):
"""Enter text in an element by css selector."""
self.browser.find_by_css(selector).type(text)
def assert_not_visible(self, selector):
"""Assert an element is not visible by css selector."""
e = self.browser.find_by_css(selector)
assert not e or e.visible is False
def assert_visible(self, selector):
"""Assert an element is visible by css selector."""
assert self.browser.find_by_css(selector).visible is True
class AdminMixin(object):
"""Mixin for tests that should happen in ModelAdmin."""
def get_modeladmin_url(self, action, **kwargs):
"""Return a modeladmin url for a model and action."""
return reverse('admin:%s_%s_%s' % (
self.model._meta.app_label,
self.model._meta.model_name,
action
), kwargs=kwargs)
def fill_name(self):
"""Fill in the name input."""
i = self.id()
half = int(len(i))
not_id = i[half:] + i[:half]
self.browser.fill('name', not_id)
class OptionMixin(object):
"""Mixin to make a unique option per test."""
def create_option(self):
"""Create a unique option from self.model into self.option."""
unique_name = six.text_type(uuid.uuid1())
if VERSION < (1, 10):
# Support for the name to be changed through a popup in the admin.
unique_name = unique_name.replace('-', '')
option, created = self.model.objects.get_or_create(
name=unique_name)
return option
class ContentTypeOptionMixin(OptionMixin):
"""Same as option mixin, with content type."""
def create_option(self):
"""Return option, content type."""
option = super(ContentTypeOptionMixin, self).create_option()
ctype = ContentType.objects.get_for_model(option)
return option, ctype
|
luzfcb/django-autocomplete-light
|
src/dal/test/case.py
|
Python
|
mit
| 3,287
|
[
"VisIt"
] |
18d5b21079ec1ad0f8000cb248baa7c8b4589fc2548cedeafccba1d4b69a5e71
|
import img_scale
import pyfits as pyf
import pylab as pyl
from mpl_toolkits.axes_grid1 import axes_grid
import cPickle as pickle
import os
from scipy.stats import scoreatpercentile
def mk_image(galaxy):
base = './../../images_v5/GS_2.5as_matched/gs_all_'
i_img = pyf.getdata(base+str(galaxy)+'_I.fits')
j_img = pyf.getdata(base+str(galaxy)+'_J.fits')
h_img = pyf.getdata(base+str(galaxy)+'_H.fits')
#include 90% of pixels
x = pyl.hstack(i_img)
i_lim = scoreatpercentile(x,99)
x = pyl.hstack(j_img)
j_lim = scoreatpercentile(x,99)
x = pyl.hstack(h_img)
h_lim = scoreatpercentile(x,99)
print galaxy, i_lim, j_lim, h_lim
img = pyl.zeros((h_img.shape[0], h_img.shape[1], 3), dtype=float)
img[:,:,0] = img_scale.asinh(h_img, scale_min=-0.1*h_lim, scale_max=h_lim,
non_linear=0.5)
img[:,:,1] = img_scale.asinh(j_img, scale_min=-0.1*j_lim, scale_max=j_lim,
non_linear=0.5)
img[:,:,2] = img_scale.asinh(i_img, scale_min=-0.1*i_lim, scale_max=i_lim,
non_linear=0.5)
return img
F = pyl.figure(1, figsize=(2, 3))
grid1 = axes_grid.ImageGrid(F, 111, nrows_ncols=(3,1), axes_pad=0.05,
add_all=True, share_all=True, aspect=True, direction='column')
ids = [9601, 4181, 1841]
base = './../../images_v5/GS_2.5as/gs_all_'
for i, ID in enumerate(ids):
if not os.path.isfile(base+str(ID)+'_I.fits'):
print('choose again', ID)
else:
grid1[i].spines['bottom'].set_color('0.8')
grid1[i].spines['top'].set_color('0.8')
grid1[i].spines['right'].set_color('0.8')
grid1[i].spines['left'].set_color('0.8')
grid1[i].set_axis_bgcolor('None')
img = mk_image(ID)
grid1[i].text(0.5, 0.5, str(ID), color='white' )
grid1[i].set_xticks([])
grid1[i].set_yticks([])
grid1[i].imshow(img, origin='lower')
# Label everything
#grid1[4].set_xlabel('8.75', fontsize=16)
#grid1[9].set_xlabel('9.25', fontsize=16)
#grid1[14].set_xlabel('9.75', fontsize=16)
#grid1[19].set_xlabel('10.25\nLog Mass $(M_\odot)$', fontsize=16)
#grid1[24].set_xlabel('10.75', fontsize=16)
#grid1[29].set_xlabel('11.25', fontsize=16)
#grid1[34].set_xlabel('11.75', fontsize=16)
#grid1[0].set_ylabel('45%', fontsize=16)
#grid1[1].set_ylabel('35%', fontsize=16)
#grid1[2].set_ylabel(r'$\xi[i_{775}, H_{160}]$ (%)'+'\n25%', fontsize=16,
# multialignment='center')
#grid1[3].set_ylabel('15%', fontsize=16)
#grid1[4].set_ylabel('5%', fontsize=16)
pyl.show()
|
boada/ICD
|
sandbox/plot_icd_examples.py
|
Python
|
mit
| 2,513
|
[
"Galaxy"
] |
ba49c9b811cd5c3bbd79f462e9864efab00e55b1bf048f1ec4c0e3c112d5b5ab
|
""" Module for converting various mesh formats."""
# Copyright (C) 2006 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Garth N. Wells (gmsh function)
# Modified by Alexander H. Jarosch (gmsh fix)
# Modified by Angelo Simone (Gmsh and Medit fix)
# Modified by Andy R. Terrel (gmsh fix and triangle function)
# Modified by Magnus Vikstrom (metis and scotch function)
# Modified by Bartosz Sawicki (diffpack function)
# Modified by Gideon Simpson (Exodus II function)
# Modified by Kent-Andre Mardal (Star-CD function)
# Modified by Nuno Lopes (fix for emc2 mesh format (medit version 0))
# Modified by Neilen Marais (add gmsh support for reading physical region)
# Modified by Evan Lezar (add support for reading gmsh physical regions on facets)
# Modified by Jan Blechta (add triangle support for marker on edges and attributes on triangles)
#
# Last changed: 2012-11-22
# NOTE: This module does not depend on (py)dolfin beeing installed.
# NOTE: If future additions need that please import dolfin in a try: except:
# NOTE: clause and tell the user to install dolfin if it is not installed.
import getopt
import sys
from instant import get_status_output
import re
import warnings
import os.path
import abaqus
import xml_writer
import numpy
def format_from_suffix(suffix):
"Return format for given suffix"
if suffix == "xml":
return "xml"
elif suffix == "mesh":
return "mesh"
elif suffix == "gmsh":
return "gmsh"
elif suffix == "msh":
return "gmsh"
elif suffix == "gra":
return "metis"
elif suffix == "grf":
return "scotch"
elif suffix == "grid":
return "diffpack"
elif suffix == "inp":
return "abaqus"
elif suffix == "ncdf":
return "NetCDF"
elif suffix =="exo":
return "ExodusII"
elif suffix =="e":
return "ExodusII"
elif suffix == "vrt" or suffix == "cel":
return "StarCD"
elif suffix == "ele" or suffix == "node":
return "Triangle"
else:
_error("Sorry, unknown suffix %s." % suffix)
def mesh2xml(ifilename, ofilename):
"""Convert between .mesh and .xml, parser implemented as a
state machine:
0 = read 'Dimension'
1 = read dimension
2 = read 'Vertices'
3 = read number of vertices
4 = read next vertex
5 = read 'Triangles' or 'Tetrahedra'
6 = read number of cells
7 = read next cell
8 = done
"""
print "Converting from Medit format (.mesh) to DOLFIN XML format"
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Remove newline
if line[-1] == "\n":
line = line[:-1]
# Read dimension
if line == "Dimension" or line == " Dimension":
line = ifile.readline()
num_dims = int(line)
if num_dims == 2:
cell_type = "triangle"
dim = 2
elif num_dims == 3:
cell_type = "tetrahedron"
dim = 3
break
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Step to beginning of file
ifile.seek(0)
# Write header
xml_writer.write_header_mesh(ofile, cell_type, dim)
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
if line[-1] == "\n":
line = line[:-1]
if state == 0:
if line == "Dimension" or line == " Dimension":
state += 1
elif state == 1:
num_dims = int(line)
state +=1
elif state == 2:
if line == "Vertices" or line == " Vertices":
state += 1
elif state == 3:
num_vertices = int(line)
xml_writer.write_header_vertices(ofile, num_vertices)
state +=1
elif state == 4:
if num_dims == 2:
(x, y, tmp) = line.split()
x = float(x)
y = float(y)
z = 0.0
elif num_dims == 3:
(x, y, z, tmp) = line.split()
x = float(x)
y = float(y)
z = float(z)
xml_writer.write_vertex(ofile, num_vertices_read, x, y, z)
num_vertices_read +=1
if num_vertices == num_vertices_read:
xml_writer.write_footer_vertices(ofile)
state += 1
elif state == 5:
if (line == "Triangles" or line == " Triangles") and num_dims == 2:
state += 1
if line == "Tetrahedra" and num_dims == 3:
state += 1
elif state == 6:
num_cells = int(line)
xml_writer.write_header_cells(ofile, num_cells)
state +=1
elif state == 7:
if num_dims == 2:
(n0, n1, n2, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
(n0, n1, n2, n3, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
n3 = int(n3) - 1
xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
xml_writer.write_footer_cells(ofile)
state += 1
elif state == 8:
break
# Check that we got all data
if state == 8:
print "Conversion done"
else:
_error("Missing data, unable to convert")
# Write footer
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def gmsh2xml(ifilename, handler):
"""Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
parser implemented as a state machine:
0 = read 'MeshFormat'
1 = read mesh format data
2 = read 'EndMeshFormat'
3 = read 'Nodes'
4 = read number of vertices
5 = read vertices
6 = read 'EndNodes'
7 = read 'Elements'
8 = read number of cells
9 = read cells
10 = done
Afterwards, extract physical region numbers if they are defined in
the mesh file as a mesh function.
"""
print "Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format"
# The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension
gmsh_dim = {15: 0, 1: 1, 2: 2, 4: 3}
cell_type_for_dim = {1: "interval", 2: "triangle", 3: "tetrahedron" }
# the gmsh element types supported for conversion
supported_gmsh_element_types = [1, 2, 4, 15]
# Open files
ifile = open(ifilename, "r")
# Scan file for cell type
cell_type = None
highest_dim = 0
line = ifile.readline()
while line:
# Remove newline
if line[-1] == "\n":
line = line[:-1]
# Read dimension
if line.find("$Elements") == 0:
line = ifile.readline()
num_elements = int(line)
if num_elements == 0:
_error("No elements found in gmsh file.")
line = ifile.readline()
# Now iterate through elements to find largest dimension. Gmsh
# format might include elements of lower dimensions in the element list.
# We also need to count number of elements of correct dimensions.
# Also determine which vertices are not used.
dim_count = {0: 0, 1: 0, 2: 0, 3: 0}
vertices_used_for_dim = {0: [], 1: [], 2: [], 3: []}
# Array used to store gmsh tags for 1D (type 1/line), 2D (type 2/triangular) elements and 3D (type 4/tet) elements
tags_for_dim = {0: [], 1: [], 2: [], 3: []}
while line.find("$EndElements") == -1:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type in supported_gmsh_element_types:
dim = gmsh_dim[elem_type]
if highest_dim < dim:
highest_dim = dim
node_num_list = [int(node) for node in element[3 + num_tags:]]
vertices_used_for_dim[dim].extend(node_num_list)
if num_tags > 0:
tags_for_dim[dim].append(tuple(int(tag) for tag in element[3:3+num_tags]))
dim_count[dim] += 1
else:
#TODO: output a warning here. "gmsh element type %d not supported" % elem_type
pass
line = ifile.readline()
else:
# Read next line
line = ifile.readline()
# Check that we got the cell type and set num_cells_counted
if highest_dim == 0:
_error("Unable to find cells of supported type.")
num_cells_counted = dim_count[highest_dim]
vertex_set = set(vertices_used_for_dim[highest_dim])
vertices_used_for_dim[highest_dim] = None
vertex_dict = {}
for n,v in enumerate(vertex_set):
vertex_dict[v] = n
# Step to beginning of file
ifile.seek(0)
# Set mesh type
handler.set_mesh_type(cell_type_for_dim[highest_dim], highest_dim)
# Initialise node list (gmsh does not export all vertexes in order)
nodelist = {}
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
# Only import the dolfin objects if facet markings exist
process_facets = False
if len(tags_for_dim[highest_dim-1]) > 0:
# first construct the mesh
try:
from dolfin import MeshEditor, Mesh
except ImportError:
_error("DOLFIN must be installed to handle Gmsh boundary regions")
mesh = Mesh()
mesh_editor = MeshEditor ()
mesh_editor.open( mesh, highest_dim, highest_dim )
process_facets = True
else:
# TODO: Output a warning or an error here
me = None
while state != 10:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
if line[-1] == "\n":
line = line[:-1]
if state == 0:
if line == "$MeshFormat":
state = 1
elif state == 1:
(version, file_type, data_size) = line.split()
state = 2
elif state == 2:
if line == "$EndMeshFormat":
state = 3
elif state == 3:
if line == "$Nodes":
state = 4
elif state == 4:
num_vertices = len(vertex_dict)
handler.start_vertices(num_vertices)
if process_facets:
mesh_editor.init_vertices ( num_vertices )
state = 5
elif state == 5:
(node_no, x, y, z) = line.split()
node_no = int(node_no)
x,y,z = [float(xx) for xx in (x,y,z)]
if vertex_dict.has_key(node_no):
node_no = vertex_dict[node_no]
else:
continue
nodelist[int(node_no)] = num_vertices_read
handler.add_vertex(num_vertices_read, [x, y, z])
if process_facets:
if highest_dim == 1:
coords = numpy.array([x])
elif highest_dim == 2:
coords = numpy.array([x, y])
elif highest_dim == 3:
coords = numpy.array([x, y, z])
mesh_editor.add_vertex(num_vertices_read, coords)
num_vertices_read +=1
if num_vertices == num_vertices_read:
handler.end_vertices()
state = 6
elif state == 6:
if line == "$EndNodes":
state = 7
elif state == 7:
if line == "$Elements":
state = 8
elif state == 8:
handler.start_cells(num_cells_counted)
if process_facets:
mesh_editor.init_cells( num_cells_counted )
state = 9
elif state == 9:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type in supported_gmsh_element_types:
dim = gmsh_dim[elem_type]
else:
dim = 0
if dim == highest_dim:
node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]
for node in node_num_list:
if not node in nodelist:
_error("Vertex %d of %s %d not previously defined." %
(node, cell_type_for_dim[dim], num_cells_read))
cell_nodes = [nodelist[n] for n in node_num_list]
handler.add_cell(num_cells_read, cell_nodes)
if process_facets:
cell_nodes = numpy.array([nodelist[n] for n in node_num_list], dtype=numpy.uintp)
mesh_editor.add_cell(num_cells_read, cell_nodes)
num_cells_read +=1
if num_cells_counted == num_cells_read:
handler.end_cells()
if process_facets:
mesh_editor.close()
state = 10
elif state == 10:
break
# Write mesh function based on the Physical Regions defined by
# gmsh, but only if they are not all zero. All zero physical
# regions indicate that no physical regions were defined.
if highest_dim not in [1,2,3]:
_error("Gmsh tags not supported for dimension %i. Probably a bug" % dim)
tags = tags_for_dim[highest_dim]
physical_regions = tuple(tag[0] for tag in tags)
if not all(tag == 0 for tag in physical_regions):
handler.start_meshfunction("physical_region", dim, num_cells_counted)
for i, physical_region in enumerate(physical_regions):
handler.add_entity_meshfunction(i, physical_region)
handler.end_meshfunction()
# Now process the facet markers
tags = tags_for_dim[highest_dim-1]
if (len(tags) > 0) and (mesh is not None):
physical_regions = tuple(tag[0] for tag in tags)
if not all(tag == 0 for tag in physical_regions):
mesh.init(highest_dim-1,0)
# Get the facet-node connectivity information (reshape as a row of node indices per facet)
if highest_dim==1:
# for 1d meshes the mesh topology returns the vertex to vertex map, which isn't what we want
# as facets are vertices
facets_as_nodes = numpy.array([[i] for i in range(mesh.num_facets())])
else:
facets_as_nodes = mesh.topology()(highest_dim-1,0)().reshape ( mesh.num_facets(), highest_dim )
# Build the reverse map
nodes_as_facets = {}
for facet in range(mesh.num_facets()):
nodes_as_facets[tuple(facets_as_nodes[facet,:])] = facet
data = [int(0*k) for k in range(mesh.num_facets()) ]
for i, physical_region in enumerate(physical_regions):
nodes = [n-1 for n in vertices_used_for_dim[highest_dim-1][highest_dim*i:(highest_dim*i+highest_dim)]]
nodes.sort()
if physical_region != 0:
try:
index = nodes_as_facets[tuple(nodes)]
data[index] = physical_region
except IndexError:
raise Exception ( "The facet (%d) was not found to mark: %s" % (i, nodes) )
# Create and initialise the mesh function
handler.start_meshfunction("facet_region", highest_dim-1, mesh.num_facets() )
for index, physical_region in enumerate ( data ):
handler.add_entity_meshfunction(index, physical_region)
handler.end_meshfunction()
# Check that we got all data
if state == 10:
print "Conversion done"
else:
_error("Missing data, unable to convert \n\ Did you use version 2.0 of the gmsh file format?")
# Close files
ifile.close()
def triangle2xml(ifilename, ofilename):
"""Convert between triangle format
(http://www.cs.cmu.edu/~quake/triangle.html) and .xml. The
given ifilename should be the prefix for the corresponding
.node, and .ele files.
"""
def get_next_line (fp):
"""Helper function for skipping comments and blank lines"""
line = fp.readline()
if line == '':
_error("Hit end of file prematurely.")
line = line.strip()
if not (line.startswith('#') or line == ''):
return line
return get_next_line(fp)
print "Converting from Triangle format {.node, .ele} to DOLFIN XML format"
# Open files
for suffix in [".node", ".ele"]:
if suffix in ifilename and ifilename[-len(suffix):] == suffix:
ifilename = ifilename.replace(suffix, "")
node_file = open(ifilename+".node", "r")
ele_file = open(ifilename+".ele", "r")
ofile = open(ofilename, "w")
try:
edge_file = open(ifilename+".edge", "r")
print "Found .edge file"
except IOError:
edge_file = None
# Read all the nodes
nodes = {}
num_nodes, dim, attr, bound = map(int, get_next_line(node_file).split())
while len(nodes) < num_nodes:
node, x, y = get_next_line(node_file).split()[:3]
nodes[int(node)] = (float(x), float(y))
# Read all the triangles
tris = {}
tri_attrs = {}
num_tris, n_per_tri, attrs = map(int, get_next_line(ele_file).split())
while len(tris) < num_tris:
line = get_next_line(ele_file).split()
tri, n1, n2, n3 = map(int, line[:4])
# vertices are ordered according to current UFC ordering scheme -
# - may change in future!
tris[tri] = tuple(sorted((n1, n2, n3)))
tri_attrs[tri] = tuple(map(float, line[4:4+attrs]))
# Read all the boundary markers from edges
edge_markers_global = {}
edge_markers_local = []
got_negative_edge_markers = False
if edge_file is not None:
num_edges, num_edge_markers = map(int, get_next_line(edge_file).split())
if num_edge_markers == 1:
while len(edge_markers_global) < num_edges:
edge, v1, v2, marker = map(int, get_next_line(edge_file).split())
if marker < 0: got_negative_edge_markers = True
edge_markers_global[tuple(sorted((v1, v2)))] = marker
if got_negative_edge_markers:
print "Some edge markers are negative! dolfin will increase "\
"them by probably 2**32 when loading xml. "\
"Consider using non-negative edge markers only."
for tri, vertices in tris.iteritems():
v0, v1, v2 = sorted((vertices[0:3]))
try:
edge_markers_local.append((tri, 0, \
edge_markers_global[(v1, v2)]))
edge_markers_local.append((tri, 1, \
edge_markers_global[(v0, v2)]))
edge_markers_local.append((tri, 2, \
edge_markers_global[(v0, v1)]))
except IndexError:
raise Exception("meshconvert.py: The facet was not found.")
elif num_edge_markers == 0:
print "...but no markers in it. Ignoring it"
else:
print "...but %d markers specified in it. It won't be processed."\
%num_edge_markers
# Write everything out
xml_writer.write_header_mesh(ofile, "triangle", 2)
xml_writer.write_header_vertices(ofile, num_nodes)
node_off = 0 if nodes.has_key(0) else -1
for node, node_t in nodes.iteritems():
xml_writer.write_vertex(ofile, node+node_off, node_t[0], node_t[1], 0.0)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_cells(ofile, num_tris)
tri_off = 0 if tris.has_key(0) else -1
for tri, tri_t in tris.iteritems():
xml_writer.write_cell_triangle(ofile, tri+tri_off, tri_t[0] + node_off,
tri_t[1] + node_off, tri_t[2] + node_off)
xml_writer.write_footer_cells(ofile)
if len(edge_markers_local) > 0:
xml_writer.write_header_domains(ofile)
xml_writer.write_header_meshvaluecollection(ofile, \
"edge markers", 1, len(edge_markers_local), "uint")
for tri, local_edge, marker in edge_markers_local:
xml_writer.write_entity_meshvaluecollection(ofile, \
1, tri+tri_off, marker, local_edge)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_footer_domains(ofile)
xml_writer.write_footer_mesh(ofile)
for i in range(attrs):
afilename = ofilename.replace(".xml", ".attr"+str(i)+".xml")
afile = open(afilename, "w")
xml_writer.write_header_meshfunction2(afile)
xml_writer.write_header_meshvaluecollection(afile, \
"triangle attribs "+str(i), 2, num_tris, "double")
for tri, tri_a in tri_attrs.iteritems():
xml_writer.write_entity_meshvaluecollection(afile, \
2, tri+tri_off, tri_a[i], 0)
xml_writer.write_footer_meshvaluecollection(afile)
xml_writer.write_footer_meshfunction(afile)
print "triangle attributes from .ele file written to "+afilename
afile.close()
# Close files
node_file.close()
ele_file.close()
if edge_file is not None:
edge_file.close()
ofile.close()
def xml_old2xml(ifilename, ofilename):
"Convert from old DOLFIN XML format to new."
print "Converting from old (pre DOLFIN 0.6.2) to new DOLFIN XML format..."
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type (assuming there is just one)
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Read dimension
if "<triangle" in line:
cell_type = "triangle"
dim = 2
break
elif "<tetrahedron" in line:
cell_type = "tetrahedron"
dim = 3
break
# Step to beginning of file
ifile.seek(0)
# Read lines and make changes
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Modify line
if "xmlns" in line:
line = "<dolfin xmlns:dolfin=\"http://fenicsproject.org\">\n"
if "<mesh>" in line:
line = " <mesh celltype=\"%s\" dim=\"%d\">\n" % (cell_type, dim)
if dim == 2 and " z=\"0.0\"" in line:
line = line.replace(" z=\"0.0\"", "")
if " name=" in line:
line = line.replace(" name=", " index=")
if " name =" in line:
line = line.replace(" name =", " index=")
if "n0" in line:
line = line.replace("n0", "v0")
if "n1" in line:
line = line.replace("n1", "v1")
if "n2" in line:
line = line.replace("n2", "v2")
if "n3" in line:
line = line.replace("n3", "v3")
# Write line
ofile.write(line)
# Close files
ifile.close();
ofile.close();
print "Conversion done"
def metis_graph2graph_xml(ifilename, ofilename):
"Convert from Metis graph format to DOLFIN Graph XML."
print "Converting from Metis graph format to DOLFIN Graph XML."
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
xml_writer.write_header_graph(ofile, "directed")
xml_writer.write_header_vertices(ofile, int(num_vertices))
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
xml_writer.write_graph_vertex(ofile, i, len(edges))
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_edges(ofile, 2*int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
for i in range(int(num_vertices)):
print "vertex %g", i
line = ifile.readline()
edges = line.split()
for e in edges:
xml_writer.write_graph_edge(ofile, i, int(e))
xml_writer.write_footer_edges(ofile)
xml_writer.write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def scotch_graph2graph_xml(ifilename, ofilename):
"Convert from Scotch graph format to DOLFIN Graph XML."
print "Converting from Scotch graph format to DOLFIN Graph XML."
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Skip graph file version number
ifile.readline()
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
# Read start index and numeric flag
# Start index is 0 or 1 (C/Fortran)
# Numeric flag is 3 bits where bit 1 enables vertex labels
# bit 2 enables edge weights and bit 3 enables vertex weights
line = ifile.readline()
(start_index, numeric_flag) = line.split()
# Handling not implented
if not numeric_flag == "000":
_error("Handling of scotch vertex labels, edge- and vertex weights not implemented")
xml_writer.write_header_graph(ofile, "undirected")
xml_writer.write_header_vertices(ofile, int(num_vertices))
# Read vertices and edges, first number gives number of edges from this vertex (not used)
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
xml_writer.write_graph_vertex(ofile, i, len(edges)-1)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_edges(ofile, int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
ifile.readline()
ifile.readline()
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
for j in range(1, len(edges)):
xml_writer.write_graph_edge(ofile, i, int(edges[j]))
xml_writer.write_footer_edges(ofile)
xml_writer.write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def diffpack2xml(ifilename, ofilename):
"Convert from Diffpack tetrahedral grid format to DOLFIN XML."
print diffpack2xml.__doc__
# Format strings for MeshFunction XML files
meshfunction_header = """\
<?xml version="1.0" encoding="UTF-8"?>\n
<dolfin xmlns:dolfin="http://www.fenics.org/dolfin/">
<mesh_function type="uint" dim="%d" size="%d">\n"""
meshfunction_entity = " <entity index=\"%d\" value=\"%d\"/>\n"
meshfunction_footer = " </mesh_function>\n</dolfin>"
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Read and analyze header
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
if re.search(r"Number of elements", line):
num_cells = int(re.match(r".*\s(\d+).*", line).group(1))
if re.search(r"Number of nodes", line):
num_vertices = int(re.match(r".*\s(\d+).*", line).group(1))
xml_writer.write_header_mesh(ofile, "tetrahedron", 3)
xml_writer.write_header_vertices(ofile, num_vertices)
# Read & write vertices and collect markers for vertices
vertex_markers = []
unique_vertex_markers = set()
for i in range(num_vertices):
line = ifile.readline()
m = re.match(r"^.*\(\s*(.*)\s*\).*\](.*)$", line)
x = map(float, re.split("[\s,]+", m.group(1)))
xml_writer.write_vertex(ofile, i, *x)
markers = map(int, m.group(2).split())
vertex_markers.append(markers)
unique_vertex_markers.update(markers)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_cells(ofile, num_cells)
# Output unique vertex markers as individual VertexFunctions
unique_vertex_markers.difference_update([0])
for unique_marker in unique_vertex_markers:
ofile_marker = open(ofilename.replace(".xml", "") + \
"_marker_" + str(unique_marker)+".xml", "w")
xml_writer.write_header_meshfunction(ofile_marker, 0, num_vertices)
for ind, markers in enumerate(vertex_markers):
if unique_marker in markers:
xml_writer.write_entity_meshfunction(ofile_marker, ind, unique_marker)
else:
xml_writer.write_entity_meshfunction(ofile_marker, ind, 0)
xml_writer.write_footer_meshfunction(ofile_marker)
# Ignore comment lines
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
# Read & write cells and collect cell and face markers
cell_markers = []
facet_markers = []
facet_to_vert = [[1,2,3], [0,2,3], [0,1,3], [0,1,2]]
vert_to_facet = facet_to_vert # The same!
cell_ind = 0
while cell_ind < num_cells:
line = ifile.readline()
v = line.split()
if not v:
continue
if v[1] != "ElmT4n3D":
_error("Only tetrahedral elements (ElmT4n3D) are implemented.")
# Store Cell markers
cell_markers.append(int(v[2]))
# Sort vertex indices
cell_indices = sorted(map(lambda x: int(x)-1, v[3:]))
xml_writer.write_cell_tetrahedron(ofile, cell_ind, *cell_indices)
# Check Facet info
process_facet = set(range(4))
for local_vert_ind, global_vert_ind in enumerate(cell_indices):
# If no marker is included for vertex skip corresponding facet
if not vertex_markers[global_vert_ind]:
process_facet.difference_update(facet_to_vert[local_vert_ind])
# Process facets
for local_facet in process_facet:
# Start with markers from first vertex
global_first_vertex = cell_indices[facet_to_vert[local_facet][0]]
marker_intersection = set(vertex_markers[global_first_vertex])
# Process the other vertices
for local_vert in facet_to_vert[local_facet][1:]:
marker_intersection.intersection_update(\
vertex_markers[cell_indices[local_vert]])
if not marker_intersection:
break
# If not break we have a marker on local_facet
else:
assert(len(marker_intersection)==1)
facet_markers.append((cell_ind, local_facet, \
marker_intersection.pop()))
# Bump cell_ind
cell_ind += 1
xml_writer.write_footer_cells(ofile)
xml_writer.write_header_domains(ofile)
# Write facet markers if any
if facet_markers:
xml_writer.write_header_meshvaluecollection(ofile, "m", 2, \
len(facet_markers), "uint")
for cell, local_facet, marker in facet_markers:
xml_writer.write_entity_meshvaluecollection(ofile, 2, cell, \
marker, local_facet)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_header_meshvaluecollection(ofile, "m", 3, \
len(cell_markers), "uint")
for cell, marker in enumerate(cell_markers):
xml_writer.write_entity_meshvaluecollection(ofile, 2, cell, \
marker)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_footer_domains(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
class ParseError(Exception):
""" Error encountered in source file.
"""
class DataHandler(object):
""" Baseclass for handlers of mesh data.
The actual handling of mesh data encountered in the source file is
delegated to a polymorfic object. Typically, the delegate will write the
data to XML.
@ivar _state: the state which the handler is in, one of State_*.
@ivar _cell_type: cell type in mesh. One of CellType_*.
@ivar _dim: mesh dimensions.
"""
State_Invalid, State_Init, State_Vertices, State_Cells, \
State_MeshFunction, State_MeshValueCollection = range(6)
CellType_Tetrahedron, CellType_Triangle, CellType_Interval = range(3)
def __init__(self):
self._state = self.State_Invalid
def set_mesh_type(self, cell_type, dim):
assert self._state == self.State_Invalid
self._state = self.State_Init
if cell_type == "tetrahedron":
self._cell_type = self.CellType_Tetrahedron
elif cell_type == "triangle":
self._cell_type = self.CellType_Triangle
elif cell_type == "interval":
self._cell_type = self.CellType_Interval
self._dim = dim
def start_vertices(self, num_vertices):
assert self._state == self.State_Init
self._state = self.State_Vertices
def add_vertex(self, vertex, coords):
assert self._state == self.State_Vertices
def end_vertices(self):
assert self._state == self.State_Vertices
self._state = self.State_Init
def start_cells(self, num_cells):
assert self._state == self.State_Init
self._state = self.State_Cells
def add_cell(self, cell, nodes):
assert self._state == self.State_Cells
def end_cells(self):
assert self._state == self.State_Cells
self._state = self.State_Init
def start_domains(self):
assert self._state == self.State_Init
def end_domains(self):
self._state = self.State_Init
def start_meshfunction(self, name, dim, size):
assert self._state == self.State_Init
self._state = self.State_MeshFunction
def add_entity_meshfunction(self, index, value):
assert self._state == self.State_MeshFunction
def end_meshfunction(self):
assert self._state == self.State_MeshFunction
self._state = self.State_Init
def start_mesh_value_collection(self, name, dim, size, etype):
assert self._state == self.State_Init
self._state = self.State_MeshValueCollection
def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):
assert self._state == self.State_MeshValueCollection
def end_mesh_value_collection(self):
assert self._state == self.State_MeshValueCollection
self._state = self.State_Init
def warn(self, msg):
""" Issue warning during parse.
"""
warnings.warn(msg)
def error(self, msg):
""" Raise error during parse.
This method is expected to raise ParseError.
"""
raise ParseError(msg)
def close(self):
self._state = self.State_Invalid
class XmlHandler(DataHandler):
""" Data handler class which writes to Dolfin XML.
"""
def __init__(self, ofilename):
DataHandler.__init__(self)
self._ofilename = ofilename
self.__ofile = file(ofilename, "wb")
self.__ofile_meshfunc = None
def ofile(self):
return self.__ofile
def set_mesh_type(self, cell_type, dim):
DataHandler.set_mesh_type(self, cell_type, dim)
xml_writer.write_header_mesh(self.__ofile, cell_type, dim)
def start_vertices(self, num_vertices):
DataHandler.start_vertices(self, num_vertices)
xml_writer.write_header_vertices(self.__ofile, num_vertices)
def add_vertex(self, vertex, coords):
DataHandler.add_vertex(self, vertex, coords)
xml_writer.write_vertex(self.__ofile, vertex, *coords)
def end_vertices(self):
DataHandler.end_vertices(self)
xml_writer.write_footer_vertices(self.__ofile)
def start_cells(self, num_cells):
DataHandler.start_cells(self, num_cells)
xml_writer.write_header_cells(self.__ofile, num_cells)
def add_cell(self, cell, nodes):
DataHandler.add_cell(self, cell, nodes)
if self._cell_type == self.CellType_Tetrahedron:
func = xml_writer.write_cell_tetrahedron
elif self._cell_type == self.CellType_Triangle:
func = xml_writer.write_cell_triangle
elif self._cell_type == self.CellType_Interval:
func = xml_writer.write_cell_interval
func(self.__ofile, cell, *nodes)
def end_cells(self):
DataHandler.end_cells(self)
xml_writer.write_footer_cells(self.__ofile)
def start_meshfunction(self, name, dim, size):
DataHandler.start_meshfunction(self, name, dim, size)
fname = os.path.splitext(self.__ofile.name)[0]
self.__ofile_meshfunc = file("%s_%s.xml" % (fname, name), "wb")
xml_writer.write_header_meshfunction(self.__ofile_meshfunc, dim, size)
def add_entity_meshfunction(self, index, value):
DataHandler.add_entity_meshfunction(self, index, value)
xml_writer.write_entity_meshfunction(self.__ofile_meshfunc, index, value)
def end_meshfunction(self):
DataHandler.end_meshfunction(self)
xml_writer.write_footer_meshfunction(self.__ofile_meshfunc)
self.__ofile_meshfunc.close()
self.__ofile_meshfunc = None
def start_domains(self):
#DataHandler.start_domains(self)
xml_writer.write_header_domains(self.__ofile)
def end_domains(self):
#DataHandler.end_domains(self)
xml_writer.write_footer_domains(self.__ofile)
def start_mesh_value_collection(self, name, dim, size, etype):
DataHandler.start_mesh_value_collection(self, name, dim, size, etype)
xml_writer.write_header_meshvaluecollection(self.__ofile, name, dim, size, etype)
def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):
DataHandler.add_entity_mesh_value_collection(self, dim, index, value)
xml_writer.write_entity_meshvaluecollection(self.__ofile, dim, index, value, local_entity=local_entity)
def end_mesh_value_collection(self):
DataHandler.end_mesh_value_collection(self)
xml_writer.write_footer_meshvaluecollection(self.__ofile)
def close(self):
DataHandler.close(self)
if self.__ofile.closed:
return
xml_writer.write_footer_mesh(self.__ofile)
self.__ofile.close()
if self.__ofile_meshfunc is not None:
self.__ofile_meshfunc.close()
def netcdf2xml(ifilename,ofilename):
"Convert from NetCDF format to DOLFIN XML."
print "Converting from NetCDF format (.ncdf) to DOLFIN XML format"
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
cell_type = None
dim = 0
# Scan file for dimension, number of nodes, number of elements
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if re.search(r"num_dim.*=", line):
dim = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_nodes.*=", line):
num_vertices = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_elem.*=", line):
num_cells = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"connect1 =",line):
break
num_dims=dim
# Set cell type
if dim == 2:
cell_type ="triangle"
if dim == 3:
cell_type ="tetrahedron"
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Write header
xml_writer.write_header_mesh(ofile, cell_type, dim)
xml_writer.write_header_cells(ofile, num_cells)
num_cells_read = 0
# Read and write cells
while 1:
# Read next line
line = ifile.readline()
if not line:
break
connect=re.split("[,;]",line)
if num_dims == 2:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
n3 = int(connect[3])-1
xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
xml_writer.write_footer_cells(ofile)
xml_writer.write_header_vertices(ofile, num_vertices)
break
num_vertices_read = 0
coords = [[],[],[]]
coord = -1
while 1:
line = ifile.readline()
if not line:
_error("Missing data")
if re.search(r"coord =",line):
break
# Read vertices
while 1:
line = ifile.readline()
if not line:
break
if re.search(r"\A\s\s\S+,",line):
coord+=1
print "Found x_"+str(coord)+" coordinates"
coords[coord] += line.split()
if re.search(r";",line):
break
# Write vertices
for i in range(num_vertices):
if num_dims == 2:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = 0
if num_dims == 3:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = float(re.split(",",coords[2].pop(0))[0])
xml_writer.write_vertex(ofile, i, x, y, z)
# Write footer
xml_writer.write_footer_vertices(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def exodus2xml(ifilename,ofilename):
"Convert from Exodus II format to DOLFIN XML."
print "Converting from Exodus II format to NetCDF format"
name = ifilename.split(".")[0]
netcdffilename = name +".ncdf"
status, output = get_status_output('ncdump '+ifilename + ' > '+netcdffilename)
if status != 0:
raise IOError, "Something wrong while executing ncdump. Is ncdump "\
"installed on the system?"
netcdf2xml(netcdffilename, ofilename)
def _error(message):
"Write an error message"
for line in message.split("\n"):
print "*** %s" % line
sys.exit(2)
def convert2xml(ifilename, ofilename, iformat=None):
""" Convert a file to the DOLFIN XML format.
"""
convert(ifilename, XmlHandler(ofilename), iformat=iformat)
def convert(ifilename, handler, iformat=None):
""" Convert a file using a provided data handler.
Note that handler.close is called when this function finishes.
@param ifilename: Name of input file.
@param handler: The data handler (instance of L{DataHandler}).
@param iformat: Format of input file.
"""
if iformat is None:
iformat = format_from_suffix(os.path.splitext(ifilename)[1][1:])
# XXX: Backwards-compat
if hasattr(handler, "_ofilename"):
ofilename = handler._ofilename
# Choose conversion
if iformat == "mesh":
# Convert from mesh to xml format
mesh2xml(ifilename, ofilename)
elif iformat == "gmsh":
# Convert from gmsh to xml format
gmsh2xml(ifilename, handler)
elif iformat == "Triangle":
# Convert from Triangle to xml format
triangle2xml(ifilename, ofilename)
elif iformat == "xml-old":
# Convert from old to new xml format
xml_old2xml(ifilename, ofilename)
elif iformat == "metis":
# Convert from metis graph to dolfin graph xml format
metis_graph2graph_xml(ifilename, ofilename)
elif iformat == "scotch":
# Convert from scotch graph to dolfin graph xml format
scotch_graph2graph_xml(ifilename, ofilename)
elif iformat == "diffpack":
# Convert from Diffpack tetrahedral grid format to xml format
diffpack2xml(ifilename, ofilename)
elif iformat == "abaqus":
# Convert from abaqus to xml format
abaqus.convert(ifilename, handler)
elif iformat == "NetCDF":
# Convert from NetCDF generated from ExodusII format to xml format
netcdf2xml(ifilename, ofilename)
elif iformat =="ExodusII":
# Convert from ExodusII format to xml format via NetCDF
exodus2xml(ifilename, ofilename)
elif iformat == "StarCD":
# Convert from Star-CD tetrahedral grid format to xml format
starcd2xml(ifilename, ofilename)
else:
_error("Sorry, cannot convert between %s and DOLFIN xml file formats." % iformat)
# XXX: handler.close messes things for other input formats than abaqus or gmsh
if iformat in ("abaqus", "gmsh"):
handler.close()
def starcd2xml(ifilename, ofilename):
"Convert from Star-CD tetrahedral grid format to DOLFIN XML."
print starcd2xml.__doc__
if not os.path.isfile(ifilename[:-3] + "vrt") or not os.path.isfile(ifilename[:-3] + "cel"):
print "StarCD format requires one .vrt file and one .cel file"
sys.exit(2)
# open output file
ofile = open(ofilename, "w")
# Open file, the vertices are in a .vrt file
ifile = open(ifilename[:-3] + "vrt", "r")
write_header_mesh(ofile, "tetrahedron", 3)
# Read & write vertices
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of vertices
num_vertices = -1
counter = 0
# nodenr_map is needed because starcd support node numbering like 1,2,4 (ie 3 is missing)
nodenr_map = {}
for line in lines:
nodenr = int(line[0:15])
nodenr_map[nodenr] = counter
counter += 1
num_vertices = counter
# third, run over all vertices
xml_writer.write_header_vertices(ofile, num_vertices)
for line in lines:
nodenr = int(line[0:15])
vertex0 = float(line[15:31])
vertex1 = float(line[31:47])
vertex2 = float(line[47:63])
xml_writer.write_vertex(ofile, nodenr_map[nodenr], float(vertex0), float(vertex1), float(vertex2))
xml_writer.write_footer_vertices(ofile)
# Open file, the cells are in a .cel file
ifile = open(ifilename[:-3] + "cel", "r")
# Read & write cells
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of cells
num_cells = -1
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if node4 > 0:
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
counter += 1
else:
print "The file does contain cells that are not tetraheders. The cell number is ", cellnr, " the line read was ", line
else:
# triangles on the surface
# print "The file does contain cells that are not tetraheders node4==0. The cell number is ", cellnr, " the line read was ", line
#sys.exit(2)
pass
num_cells = counter
# third, run over all cells
xml_writer.write_header_cells(ofile, num_cells)
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if (node4 > 0):
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
xml_writer.write_cell_tetrahedron(ofile, counter, nodenr_map[node0], nodenr_map[node1], nodenr_map[node2], nodenr_map[node4])
counter += 1
xml_writer.write_footer_cells(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
|
maciekswat/dolfin_1.3.0
|
site-packages/dolfin_utils/meshconvert/meshconvert.py
|
Python
|
gpl-3.0
| 49,645
|
[
"NetCDF"
] |
b89a8480d7b46c1f25c47f63f7faebd7dbd99554fc625553656d832b82925eb4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rdkit
from rdkit.Chem import AllChem
from rdkit import DataStructs
__license__ = "X11"
METADATA = {
"id": "method_rdkit_fcfp2_2048_tanimoto",
"representation": "fcfp2_2048",
"similarity": "tanimoto"
}
def _compute_fingerprint(molecule):
return AllChem.GetMorganFingerprintAsBitVect(
molecule, 1, nBits=2048, useFeatures=True)
def _compute_similarity(left, right):
return DataStructs.TanimotoSimilarity(left, right)
def create_model(train_ligands, train_decoys):
model = []
for molecule in train_ligands:
model.append({
"name": molecule.GetProp("_Name"),
"fingerprint": _compute_fingerprint(molecule)
})
model_information = {}
return model, model_information
def compute_score(model, molecule):
fingerprint = _compute_fingerprint(molecule)
similarities = [_compute_similarity(fingerprint, item["fingerprint"])
for item in model]
max_score = max(similarities)
index_of_max_score = similarities.index(max_score)
closest_molecule = model[index_of_max_score]
return {
"value": max_score,
"info": {
"closest": closest_molecule["name"]
}
}
def compute_similarity(left, right):
return _compute_similarity(_compute_fingerprint(left),
_compute_fingerprint(right))
|
skodapetr/lbvs-environment
|
methods/fcfp/fcfp2_2048_tanimoto.py
|
Python
|
mit
| 1,423
|
[
"RDKit"
] |
a4e9a026f8623f8d9620fa3abbb9cdf55bbc489dd5d18f3601e8554cdbfec8dd
|
#!/usr/bin/env python
#
# This code was copied from the data generation program of Tencent Alchemy
# project (https://github.com/tencent-alchemy).
#
#
# #
# # Copyright 2019 Tencent America LLC. All Rights Reserved.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# #
# # Author: Qiming Sun <osirpt.sun@gmail.com>
# #
'''
Non-relativistic UHF analytical Hessian
'''
import time
import numpy
import numpy as np
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.hessian import rhf as rhf_hess
from pyscf.hessian import uhf as uhf_hess
from pyscf.df.hessian.rhf import _int3c_wrapper, _load_dim0
def partial_hess_elec(hessobj, mo_energy=None, mo_coeff=None, mo_occ=None,
atmlst=None, max_memory=4000, verbose=None):
e1, ej, ek = _partial_hess_ejk(hessobj, mo_energy, mo_coeff, mo_occ,
atmlst, max_memory, verbose, True)
return e1 + ej - ek
def _partial_hess_ejk(hessobj, mo_energy=None, mo_coeff=None, mo_occ=None,
atmlst=None, max_memory=4000, verbose=None, with_k=True):
log = logger.new_logger(hessobj, verbose)
time0 = t1 = (time.clock(), time.time())
mol = hessobj.mol
mf = hessobj.base
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
if atmlst is None: atmlst = range(mol.natm)
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
nocca = mocca.shape[1]
noccb = moccb.shape[1]
dm0a = numpy.dot(mocca, mocca.T)
dm0b = numpy.dot(moccb, moccb.T)
dm0 = dm0a + dm0b
# Energy weighted density matrix
mo_ea = mo_energy[0][mo_occ[0]>0]
mo_eb = mo_energy[1][mo_occ[1]>0]
dme0 = numpy.einsum('pi,qi,i->pq', mocca, mocca, mo_ea)
dme0+= numpy.einsum('pi,qi,i->pq', moccb, moccb, mo_eb)
auxmol = hessobj.base.with_df.auxmol
naux = auxmol.nao
nbas = mol.nbas
auxslices = auxmol.aoslice_by_atom()
aoslices = mol.aoslice_by_atom()
aux_loc = auxmol.ao_loc
blksize = min(480, hessobj.max_memory*.3e6/8/nao**2)
aux_ranges = ao2mo.outcore.balance_partition(auxmol.ao_loc, blksize)
hcore_deriv = hessobj.hcore_generator(mol)
s1aa, s1ab, s1a = rhf_hess.get_ovlp(mol)
ftmp = lib.H5TmpFile()
get_int3c = _int3c_wrapper(mol, auxmol, 'int3c2e', 's1')
# Without RI basis response
# (20|0)(0|00)
# (11|0)(0|00)
# (10|0)(0|10)
int2c = auxmol.intor('int2c2e', aosym='s1')
int2c_low = scipy.linalg.cho_factor(int2c, lower=True)
int2c_ip1 = auxmol.intor('int2c2e_ip1', aosym='s1')
rhoj0_P = 0
if with_k:
if hessobj.max_memory*.4e6/8 < naux*nocca*(nocca+nao):
raise RuntimeError('Memory not enough. You need to increase mol.max_memory')
rhok0a_Pl_ = np.empty((naux,nao,nocca))
rhok0b_Pl_ = np.empty((naux,nao,noccb))
for i, (shl0, shl1, p0, p1) in enumerate(aoslices):
int3c = get_int3c((shl0, shl1, 0, nbas, 0, auxmol.nbas))
rhoj0_P += np.einsum('klp,kl->p', int3c, dm0[p0:p1])
if with_k:
tmp = lib.einsum('ijp,jk->pik', int3c, mocca)
tmp = scipy.linalg.cho_solve(int2c_low, tmp.reshape(naux,-1), overwrite_b=True)
rhok0a_Pl_[:,p0:p1] = tmp.reshape(naux,p1-p0,nocca)
tmp = lib.einsum('ijp,jk->pik', int3c, moccb)
tmp = scipy.linalg.cho_solve(int2c_low, tmp.reshape(naux,-1), overwrite_b=True)
rhok0b_Pl_[:,p0:p1] = tmp.reshape(naux,p1-p0,noccb)
int3c = tmp = None
rhoj0_P = scipy.linalg.cho_solve(int2c_low, rhoj0_P)
get_int3c_ipip1 = _int3c_wrapper(mol, auxmol, 'int3c2e_ipip1', 's1')
vj1_diag = 0
vk1a_diag = 0
vk1b_diag = 0
for shl0, shl1, nL in aux_ranges:
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
p0, p1 = aux_loc[shl0], aux_loc[shl1]
int3c_ipip1 = get_int3c_ipip1(shls_slice)
vj1_diag += np.einsum('xijp,p->xij', int3c_ipip1, rhoj0_P[p0:p1]).reshape(3,3,nao,nao)
if with_k:
tmp = lib.einsum('Plj,Jj->PlJ', rhok0a_Pl_[p0:p1], mocca)
vk1a_diag += lib.einsum('xijp,plj->xil', int3c_ipip1, tmp).reshape(3,3,nao,nao)
tmp = lib.einsum('Plj,Jj->PlJ', rhok0b_Pl_[p0:p1], moccb)
vk1b_diag += lib.einsum('xijp,plj->xil', int3c_ipip1, tmp).reshape(3,3,nao,nao)
vhfa_diag = vj1_diag-vk1a_diag
vhfb_diag = vj1_diag-vk1b_diag
t1 = log.timer_debug1('contracting int2e_ipip1', *t1)
int3c_ipip1 = get_int3c_ipip1 = tmp = None
get_int3c_ip1 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip1', 's1')
rho_ip1 = ftmp.create_dataset('rho_ip1', (nao,nao,naux,3), 'f8')
rhoka_ip1_IkP = ftmp.create_group('rhoka_ip1_IkP')
rhokb_ip1_IkP = ftmp.create_group('rhokb_ip1_IkP')
rhoka_ip1_PkI = ftmp.create_group('rhoka_ip1_PkI')
rhokb_ip1_PkI = ftmp.create_group('rhokb_ip1_PkI')
rhoj1 = np.empty((mol.natm,naux,3))
wj1 = np.empty((mol.natm,naux,3))
for i0, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
shls_slice = (shl0, shl1, 0, nbas, 0, auxmol.nbas)
int3c_ip1 = get_int3c_ip1(shls_slice)
tmp_ip1 = scipy.linalg.cho_solve(int2c_low, int3c_ip1.reshape(-1,naux).T,
overwrite_b=True).reshape(naux,3,p1-p0,nao)
rhoj1[i0] = np.einsum('pxij,ji->px', tmp_ip1, dm0[:,p0:p1])
wj1[i0] = np.einsum('xijp,ji->px', int3c_ip1, dm0[:,p0:p1])
rho_ip1[p0:p1] = tmp_ip1.transpose(2,3,0,1)
if with_k:
tmp = lib.einsum('pykl,li->ikpy', tmp_ip1, dm0a)
rhoka_ip1_IkP['%.4d'%ia] = tmp
rhoka_ip1_PkI['%.4d'%ia] = tmp.transpose(2,1,0,3)
tmp = None
tmp = lib.einsum('pykl,li->ikpy', tmp_ip1, dm0b)
rhokb_ip1_IkP['%.4d'%ia] = tmp
rhokb_ip1_PkI['%.4d'%ia] = tmp.transpose(2,1,0,3)
tmp = None
ej = lib.einsum('ipx,jpy->ijxy', rhoj1, wj1) * 4
ek = np.zeros_like(ej)
e1 = np.zeros_like(ej)
rhoj1 = wj1 = None
if with_k:
vk2a_buf = 0
vk2b_buf = 0
for shl0, shl1, nL in aux_ranges:
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
p0, p1 = aux_loc[shl0], aux_loc[shl1]
int3c_ip1 = get_int3c_ip1(shls_slice)
vk2a_buf += lib.einsum('xijp,pkjy->xyki', int3c_ip1, _load_dim0(rhoka_ip1_PkI, p0, p1))
vk2b_buf += lib.einsum('xijp,pkjy->xyki', int3c_ip1, _load_dim0(rhokb_ip1_PkI, p0, p1))
get_int3c_ip2 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip2', 's1')
wj_ip2 = np.empty((naux,3))
wka_ip2_Ipk = ftmp.create_dataset('wka_ip2', (nao,naux,3,nao), 'f8')
wkb_ip2_Ipk = ftmp.create_dataset('wkb_ip2', (nao,naux,3,nao), 'f8')
if hessobj.auxbasis_response > 1:
wka_ip2_P__ = np.empty((naux,3,nocca,nocca))
wkb_ip2_P__ = np.empty((naux,3,noccb,noccb))
for shl0, shl1, nL in aux_ranges:
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
p0, p1 = aux_loc[shl0], aux_loc[shl1]
int3c_ip2 = get_int3c_ip2(shls_slice)
wj_ip2[p0:p1] = np.einsum('yklp,lk->py', int3c_ip2, dm0)
if with_k:
wka_ip2_Ipk[:,p0:p1] = lib.einsum('yklp,il->ipyk', int3c_ip2, dm0a)
wkb_ip2_Ipk[:,p0:p1] = lib.einsum('yklp,il->ipyk', int3c_ip2, dm0b)
if hessobj.auxbasis_response > 1:
wka_ip2_P__[p0:p1] = lib.einsum('xuvp,ui,vj->pxij', int3c_ip2, mocca, mocca)
wkb_ip2_P__[p0:p1] = lib.einsum('xuvp,ui,vj->pxij', int3c_ip2, moccb, moccb)
int3c_ip2 = None
if hessobj.auxbasis_response > 1:
get_int3c_ipip2 = _int3c_wrapper(mol, auxmol, 'int3c2e_ipip2', 's1')
rhok0a_P__ = lib.einsum('plj,li->pij', rhok0a_Pl_, mocca)
rhok0b_P__ = lib.einsum('plj,li->pij', rhok0b_Pl_, moccb)
rho2c_0 = lib.einsum('pij,qij->pq', rhok0a_P__, rhok0a_P__)
rho2c_0 += lib.einsum('pij,qij->pq', rhok0b_P__, rhok0b_P__)
int2c_inv = np.linalg.inv(int2c)
int2c_ipip1 = auxmol.intor('int2c2e_ipip1', aosym='s1')
int2c_ip_ip = lib.einsum('xpq,qr,ysr->xyps', int2c_ip1, int2c_inv, int2c_ip1)
int2c_ip_ip -= auxmol.intor('int2c2e_ip1ip2', aosym='s1').reshape(3,3,naux,naux)
int2c = int2c_low = None
get_int3c_ipvip1 = _int3c_wrapper(mol, auxmol, 'int3c2e_ipvip1', 's1')
get_int3c_ip1ip2 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip1ip2', 's1')
for i0, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
shls_slice = (shl0, shl1, 0, nbas, 0, auxmol.nbas)
# (10|0)(0|10) without response of RI basis
if with_k:
int3c_ip1 = get_int3c_ip1(shls_slice)
vk1a = lib.einsum('xijp,ikpy->xykj', int3c_ip1, _load_dim0(rhoka_ip1_IkP, p0, p1))
vk1b = lib.einsum('xijp,ikpy->xykj', int3c_ip1, _load_dim0(rhokb_ip1_IkP, p0, p1))
vk1a[:,:,:,p0:p1] += vk2a_buf[:,:,:,p0:p1]
vk1b[:,:,:,p0:p1] += vk2b_buf[:,:,:,p0:p1]
t1 = log.timer_debug1('contracting int2e_ip1ip2 for atom %d'%ia, *t1)
int3c_ip1 = None
# (11|0)(0|00) without response of RI basis
int3c_ipvip1 = get_int3c_ipvip1(shls_slice)
vj1 = np.einsum('xijp,p->xji', int3c_ipvip1, rhoj0_P).reshape(3,3,nao,p1-p0)
if with_k:
tmp = lib.einsum('pki,ji->pkj', rhok0a_Pl_, mocca[p0:p1])
vk1a += lib.einsum('xijp,pki->xjk', int3c_ipvip1, tmp).reshape(3,3,nao,nao)
tmp = lib.einsum('pki,ji->pkj', rhok0b_Pl_, moccb[p0:p1])
vk1b += lib.einsum('xijp,pki->xjk', int3c_ipvip1, tmp).reshape(3,3,nao,nao)
t1 = log.timer_debug1('contracting int2e_ipvip1 for atom %d'%ia, *t1)
int3c_ipvip1 = tmp = None
s1ao = numpy.zeros((3,nao,nao))
s1ao[:,p0:p1] += s1a[:,p0:p1]
s1ao[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)
s1ooa = numpy.einsum('xpq,pi,qj->xij', s1ao, mocca, mocca)
s1oob = numpy.einsum('xpq,pi,qj->xij', s1ao, moccb, moccb)
e1[i0,i0] -= numpy.einsum('xypq,pq->xy', s1aa[:,:,p0:p1], dme0[p0:p1])*2
ej[i0,i0] += numpy.einsum('xypq,pq->xy', vj1_diag[:,:,p0:p1], dm0[p0:p1])*2
if with_k:
ek[i0,i0] += numpy.einsum('xypq,pq->xy', vk1a_diag[:,:,p0:p1], dm0a[p0:p1])*2
ek[i0,i0] += numpy.einsum('xypq,pq->xy', vk1b_diag[:,:,p0:p1], dm0b[p0:p1])*2
for j0, ja in enumerate(atmlst[:i0+1]):
q0, q1 = aoslices[ja][2:]
ej[i0,j0] += numpy.einsum('xypq,pq->xy', vj1[:,:,q0:q1], dm0[q0:q1,p0:p1])*2
e1[i0,j0] -= numpy.einsum('xypq,pq->xy', s1ab[:,:,p0:p1,q0:q1], dme0[p0:p1,q0:q1])*2
if with_k:
ek[i0,j0] += numpy.einsum('xypq,pq->xy', vk1a[:,:,q0:q1], dm0a[q0:q1])*2
ek[i0,j0] += numpy.einsum('xypq,pq->xy', vk1b[:,:,q0:q1], dm0b[q0:q1])*2
h1ao = hcore_deriv(ia, ja)
e1[i0,j0] += numpy.einsum('xypq,pq->xy', h1ao, dm0)
# The first order RI basis response
# (10|1)(0|00)
# (10|0)(1|0)(0|00)
# (10|0)(0|1)(0|00)
# (10|0)(1|00)
if hessobj.auxbasis_response:
wk1_Pij = rho_ip1[p0:p1].transpose(2,3,0,1)
rhoj1_P = np.einsum('pxij,ji->px', wk1_Pij, dm0[:,p0:p1])
# (10|1)(0|0)(0|00)
int3c_ip1ip2 = get_int3c_ip1ip2(shls_slice)
wj11_p = np.einsum('xijp,ji->xp', int3c_ip1ip2, dm0[:,p0:p1])
# (10|0)(1|0)(0|00)
wj0_01 = np.einsum('ypq,q->yp', int2c_ip1, rhoj0_P)
if with_k:
rhok0_P_I = lib.einsum('plj,il->pji', rhok0a_Pl_, dm0a[p0:p1])
rhok0_PJI = lib.einsum('pji,Jj->pJi', rhok0_P_I, mocca)
rhok0_P_I = lib.einsum('plj,il->pji', rhok0b_Pl_, dm0b[p0:p1])
rhok0_PJI+= lib.einsum('pji,Jj->pJi', rhok0_P_I, moccb)
wk1_pJI = lib.einsum('ypq,qji->ypji', int2c_ip1, rhok0_PJI)
wk1_IpJ = lib.einsum('ipyk,kj->ipyj', wka_ip2_Ipk[p0:p1], dm0a)
wk1_IpJ+= lib.einsum('ipyk,kj->ipyj', wkb_ip2_Ipk[p0:p1], dm0b)
rho2c_PQ = lib.einsum('pxij,qji->xqp', wk1_Pij, rhok0_PJI)
for j0, (q0, q1) in enumerate(auxslices[:,2:]):
# (10|1)(0|00)
_ej = np.einsum('xp,p->x', wj11_p[:,q0:q1], rhoj0_P[q0:q1]).reshape(3,3)
# (10|0)(0|1)(0|00)
_ej -= lib.einsum('yqp,q,px->xy', int2c_ip1[:,q0:q1], rhoj0_P[q0:q1], rhoj1_P)
# (10|0)(1|0)(0|00)
_ej -= lib.einsum('px,yp->xy', rhoj1_P[q0:q1], wj0_01[:,q0:q1])
# (10|0)(1|00)
_ej += lib.einsum('px,py->xy', rhoj1_P[q0:q1], wj_ip2[q0:q1])
if hessobj.auxbasis_response > 1:
ej[i0,j0] += _ej * 2
ej[j0,i0] += _ej.T * 2
else:
ej[i0,j0] += _ej
ej[j0,i0] += _ej.T
if with_k:
_ek = lib.einsum('xijp,pji->x', int3c_ip1ip2[:,:,:,q0:q1],
rhok0_PJI[q0:q1]).reshape(3,3)
_ek -= lib.einsum('pxij,ypji->xy', wk1_Pij[q0:q1], wk1_pJI[:,q0:q1])
_ek -= lib.einsum('xqp,yqp->xy', rho2c_PQ[:,q0:q1], int2c_ip1[:,q0:q1])
_ek += lib.einsum('pxij,ipyj->xy', wk1_Pij[q0:q1], wk1_IpJ[:,q0:q1])
if hessobj.auxbasis_response > 1:
ek[i0,j0] += _ek * 2
ek[j0,i0] += _ek.T * 2
else:
ek[i0,j0] += _ek
ek[j0,i0] += _ek.T
int3c_ip1ip2 = None
# The second order RI basis response
if hessobj.auxbasis_response > 1:
# (00|2)(0|00)
# (00|0)(2|0)(0|00)
shl0, shl1, p0, p1 = auxslices[ia]
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
int3c_ipip2 = get_int3c_ipip2(shls_slice)
ej[i0,i0] += np.einsum('xijp,ji,p->x', int3c_ipip2, dm0, rhoj0_P[p0:p1]).reshape(3,3)
ej[i0,i0] -= np.einsum('p,xpq,q->x', rhoj0_P[p0:p1], int2c_ipip1[:,p0:p1], rhoj0_P).reshape(3,3)
if with_k:
rhok0_PJI = lib.einsum('Pij,Jj,Ii->PJI', rhok0a_P__[p0:p1], mocca, mocca)
rhok0_PJI += lib.einsum('Pij,Jj,Ii->PJI', rhok0b_P__[p0:p1], moccb, moccb)
ek[i0,i0] += np.einsum('xijp,pij->x', int3c_ipip2, rhok0_PJI).reshape(3,3)
ek[i0,i0] -= np.einsum('pq,xpq->x', rho2c_0[p0:p1], int2c_ipip1[:,p0:p1]).reshape(3,3)
rhok0_PJI = None
# (00|0)(1|1)(0|00)
# (00|1)(1|0)(0|00)
# (00|1)(0|1)(0|00)
# (00|1)(1|00)
rhoj1 = lib.einsum('px,pq->xq', wj_ip2[p0:p1], int2c_inv[p0:p1])
# (00|0)(0|1)(1|0)(0|00)
rhoj0_01 = lib.einsum('xp,pq->xq', wj0_01[:,p0:p1], int2c_inv[p0:p1])
# (00|0)(1|0)(1|0)(0|00)
ip1_2c_2c = lib.einsum('xpq,qr->xpr', int2c_ip1[:,p0:p1], int2c_inv)
rhoj0_10 = lib.einsum('p,xpq->xq', rhoj0_P[p0:p1], ip1_2c_2c)
if with_k:
# (00|0)(0|1)(1|0)(0|00)
ip1_rho2c = .5 * lib.einsum('xpq,qr->xpr', int2c_ip1[:,p0:p1], rho2c_0)
rho2c_1 = lib.einsum('xrq,rp->xpq', ip1_rho2c, int2c_inv[p0:p1])
# (00|0)(1|0)(1|0)(0|00)
rho2c_1 += lib.einsum('xrp,rq->xpq', ip1_2c_2c, rho2c_0[p0:p1])
# (00|1)(0|1)(0|00)
# (00|1)(1|0)(0|00)
int3c_ip2 = get_int3c_ip2(shls_slice)
tmp = lib.einsum('xuvr,vj,ui,qij,rp->xpq', int3c_ip2,
mocca, mocca, rhok0a_P__, int2c_inv[p0:p1])
tmp+= lib.einsum('xuvr,vj,ui,qij,rp->xpq', int3c_ip2,
moccb, moccb, rhok0b_P__, int2c_inv[p0:p1])
rho2c_1 -= tmp
rho2c_1 -= tmp.transpose(0,2,1)
int3c_ip2 = tmp = None
for j0, (q0, q1) in enumerate(auxslices[:,2:]):
_ej = 0
# (00|0)(1|1)(0|00)
# (00|0)(1|0)(0|1)(0|00)
_ej += .5 * np.einsum('p,xypq,q->xy', rhoj0_P[p0:p1], int2c_ip_ip[:,:,p0:p1,q0:q1], rhoj0_P[q0:q1])
# (00|1)(1|0)(0|00)
_ej -= lib.einsum('xp,yp->xy', rhoj1[:,q0:q1], wj0_01[:,q0:q1])
# (00|1)(1|00)
_ej += .5 * lib.einsum('xp,py->xy', rhoj1[:,q0:q1], wj_ip2[q0:q1])
# (00|0)(0|1)(1|0)(0|00)
_ej += .5 * np.einsum('xp,yp->xy', rhoj0_01[:,q0:q1], wj0_01[:,q0:q1])
# (00|1)(0|1)(0|00)
_ej -= lib.einsum('yqp,q,xp->xy', int2c_ip1[:,q0:q1], rhoj0_P[q0:q1], rhoj1)
# (00|0)(1|0)(1|0)(0|00)
_ej += np.einsum('xp,yp->xy', rhoj0_10[:,q0:q1], wj0_01[:,q0:q1])
ej[i0,j0] += _ej
ej[j0,i0] += _ej.T
if with_k:
# (00|0)(1|1)(0|00)
# (00|0)(1|0)(0|1)(0|00)
_ek = .5 * np.einsum('pq,xypq->xy', rho2c_0[p0:p1,q0:q1], int2c_ip_ip[:,:,p0:p1,q0:q1])
# (00|1)(0|1)(0|00)
# (00|1)(1|0)(0|00)
# (00|0)(0|1)(1|0)(0|00)
# (00|0)(1|0)(1|0)(0|00)
_ek += np.einsum('xpq,ypq->xy', rho2c_1[:,q0:q1], int2c_ip1[:,q0:q1])
# (00|1)(1|00)
_ek += .5 * lib.einsum('pxij,pq,qyij->xy', wka_ip2_P__[p0:p1],
int2c_inv[p0:p1,q0:q1], wka_ip2_P__[q0:q1])
_ek += .5 * lib.einsum('pxij,pq,qyij->xy', wkb_ip2_P__[p0:p1],
int2c_inv[p0:p1,q0:q1], wkb_ip2_P__[q0:q1])
ek[i0,j0] += _ek
ek[j0,i0] += _ek.T
for i0, ia in enumerate(atmlst):
for j0 in range(i0):
e1[j0,i0] = e1[i0,j0].T
ej[j0,i0] = ej[i0,j0].T
ek[j0,i0] = ek[i0,j0].T
log.timer('UHF partial hessian', *time0)
return e1, ej, ek
def make_h1(hessobj, mo_coeff, mo_occ, chkfile=None, atmlst=None, verbose=None):
time0 = t1 = (time.clock(), time.time())
mol = hessobj.mol
if atmlst is None:
atmlst = range(mol.natm)
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
dm0a = numpy.dot(mocca, mocca.T)
dm0b = numpy.dot(moccb, moccb.T)
hcore_deriv = hessobj.base.nuc_grad_method().hcore_generator(mol)
aoslices = mol.aoslice_by_atom()
h1aoa = [None] * mol.natm
h1aob = [None] * mol.natm
for ia, h1, vj1, vk1a, vk1b in _gen_jk(hessobj, mo_coeff, mo_occ, chkfile,
atmlst, verbose, True):
h1a = h1 + vj1 - vk1a
h1b = h1 + vj1 - vk1b
if chkfile is None:
h1aoa[ia] = h1a
h1aob[ia] = h1b
else:
lib.chkfile.save(chkfile, 'scf_f1ao/0/%d' % ia, h1a)
lib.chkfile.save(chkfile, 'scf_f1ao/1/%d' % ia, h1b)
if chkfile is None:
return (h1aoa,h1aob)
else:
return chkfile
def _gen_jk(hessobj, mo_coeff, mo_occ, chkfile=None, atmlst=None,
verbose=None, with_k=True):
time0 = t1 = (time.clock(), time.time())
mol = hessobj.mol
if atmlst is None:
atmlst = range(mol.natm)
auxmol = hessobj.base.with_df.auxmol
nbas = mol.nbas
auxslices = auxmol.aoslice_by_atom()
aux_loc = auxmol.ao_loc
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
nocca = mocca.shape[1]
noccb = moccb.shape[1]
dm0a = numpy.dot(mocca, mocca.T)
dm0b = numpy.dot(moccb, moccb.T)
dm0 = dm0a + dm0b
hcore_deriv = hessobj.base.nuc_grad_method().hcore_generator(mol)
get_int3c = _int3c_wrapper(mol, auxmol, 'int3c2e', 's1')
aoslices = mol.aoslice_by_atom()
naux = auxmol.nao
ftmp = lib.H5TmpFile()
rho0_Pij = ftmp.create_group('rho0_Pij')
wj_Pij = ftmp.create_group('wj_Pij')
int2c = auxmol.intor('int2c2e', aosym='s1')
int2c_low = scipy.linalg.cho_factor(int2c, lower=True)
int2c_ip1 = auxmol.intor('int2c2e_ip1', aosym='s1')
rhoj0_P = 0
if with_k:
rhok0a_Pl_ = np.empty((naux,nao,nocca))
rhok0b_Pl_ = np.empty((naux,nao,noccb))
for i, (shl0, shl1, p0, p1) in enumerate(aoslices):
int3c = get_int3c((shl0, shl1, 0, nbas, 0, auxmol.nbas))
coef3c = scipy.linalg.cho_solve(int2c_low, int3c.reshape(-1,naux).T, overwrite_b=True)
rho0_Pij['%.4d'%i] = coef3c = coef3c.reshape(naux,p1-p0,nao)
rhoj0_P += np.einsum('pkl,kl->p', coef3c, dm0[p0:p1])
if with_k:
rhok0a_Pl_[:,p0:p1] = lib.einsum('pij,jk->pik', coef3c, mocca)
rhok0b_Pl_[:,p0:p1] = lib.einsum('pij,jk->pik', coef3c, moccb)
if hessobj.auxbasis_response:
wj_Pij['%.4d'%i] = lib.einsum('xqp,pij->qixj', int2c_ip1, coef3c)
int3c = coef3c = None
get_int3c_ip1 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip1', 's1')
get_int3c_ip2 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip2', 's1')
aux_ranges = ao2mo.outcore.balance_partition(auxmol.ao_loc, 480)
vk1a_buf = np.zeros((3,nao,nao))
vk1b_buf = np.zeros((3,nao,nao))
vj1_buf = np.zeros((mol.natm,3,nao,nao))
for shl0, shl1, nL in aux_ranges:
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
p0, p1 = aux_loc[shl0], aux_loc[shl1]
int3c_ip1 = get_int3c_ip1(shls_slice)
coef3c = _load_dim0(rho0_Pij, p0, p1)
for i, (shl0, shl1, q0, q1) in enumerate(aoslices):
wj1 = np.einsum('xijp,ji->xp', int3c_ip1[:,q0:q1], dm0[:,q0:q1])
vj1_buf[i] += np.einsum('xp,pij->xij', wj1, coef3c)
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0a_Pl_[p0:p1], mocca)
vk1a_buf += lib.einsum('xijp,plj->xil', int3c_ip1, rhok0_PlJ[p0:p1])
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0b_Pl_[p0:p1], moccb)
vk1b_buf += lib.einsum('xijp,plj->xil', int3c_ip1, rhok0_PlJ[p0:p1])
int3c_ip1 = None
vj1_buf = ftmp['vj1_buf'] = vj1_buf
for i0, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
shls_slice = (shl0, shl1, 0, nbas, 0, auxmol.nbas)
int3c_ip1 = get_int3c_ip1(shls_slice)
vj1 = -np.asarray(vj1_buf[ia])
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0a_Pl_, mocca[p0:p1])
vk1a = -lib.einsum('xijp,pki->xkj', int3c_ip1, rhok0_PlJ)
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0b_Pl_, moccb[p0:p1])
vk1b = -lib.einsum('xijp,pki->xkj', int3c_ip1, rhok0_PlJ)
vj1[:,p0:p1] -= np.einsum('xijp,p->xij', int3c_ip1, rhoj0_P)
vk1a[:,p0:p1] -= vk1a_buf[:,p0:p1]
vk1b[:,p0:p1] -= vk1b_buf[:,p0:p1]
if hessobj.auxbasis_response:
shl0, shl1, q0, q1 = auxslices[ia]
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
int3c_ip2 = get_int3c_ip2(shls_slice)
rhoj1 = np.einsum('xijp,ji->xp', int3c_ip2, dm0)
coef3c = _load_dim0(rho0_Pij, q0, q1)
Pij = _load_dim0(wj_Pij, q0, q1)
vj1 += .5 * np.einsum('pij,xp->xij', coef3c, -rhoj1)
vj1 += .5 * np.einsum('xijp,p->xij', int3c_ip2, -rhoj0_P[q0:q1])
vj1 -= .5 * lib.einsum('xpq,q,pij->xij', int2c_ip1[:,q0:q1], -rhoj0_P, coef3c)
vj1 -= .5 * lib.einsum('pixj,p->xij', Pij, -rhoj0_P[q0:q1])
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0a_Pl_[q0:q1], mocca)
vk1a -= lib.einsum('plj,xijp->xil', rhok0_PlJ, int3c_ip2)
vk1a += lib.einsum('pjxi,plj->xil', Pij, rhok0_PlJ)
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0b_Pl_[q0:q1], moccb)
vk1b -= lib.einsum('plj,xijp->xil', rhok0_PlJ, int3c_ip2)
vk1b += lib.einsum('pjxi,plj->xil', Pij, rhok0_PlJ)
vj1 = vj1 + vj1.transpose(0,2,1)
vk1a = vk1a + vk1a.transpose(0,2,1)
vk1b = vk1b + vk1b.transpose(0,2,1)
h1 = hcore_deriv(ia)
yield ia, h1, vj1, vk1a, vk1b
class Hessian(uhf_hess.Hessian):
'''Non-relativistic UHF hessian'''
def __init__(self, mf):
self.auxbasis_response = 1
uhf_hess.Hessian.__init__(self, mf)
partial_hess_elec = partial_hess_elec
make_h1 = make_h1
#TODO: Insert into DF class
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
[1 , (1. , 0. , 0.000)],
[1 , (0. , 1. , 0.000)],
[1 , (0. , -1.517 , 1.177)],
[1 , (0. , 1.517 , 1.177)] ]
mol.basis = '631g'
mol.spin = 2
mol.unit = 'B'
mol.build()
mf = scf.UHF(mol).density_fit()
mf.conv_tol = 1e-14
mf.scf()
n3 = mol.natm * 3
hobj = Hessian(mf)
e2 = hobj.kernel()
ref = scf.UHF(mol).run().Hessian().kernel()
print(abs(e2-ref).max())
print(lib.finger(e2) - -0.23856667321975722)
e2 = hobj.set(auxbasis_response=2).kernel()
print(abs(e2-ref).max())
print(lib.finger(e2), - 0.72321237584876141)
|
gkc1000/pyscf
|
pyscf/df/hessian/uhf.py
|
Python
|
apache-2.0
| 25,334
|
[
"PySCF"
] |
5cab8ab8908106463f93cf3a25caee6a5d58d26d411a2fa329226e02608e8fa1
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageMedian3D(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageMedian3D(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkImageMedian3D.py
|
Python
|
bsd-3-clause
| 489
|
[
"VTK"
] |
3a0a6ada2f74f638045148dd425d5bf4c127ff8347a0b602b10bf39cf8281c2f
|
#!/usr/bin/env python
from data.electron_configuration import configurations, core_states
from ase.units import Ha
import copy
from data.ONCV_PBEsol_conf import ONCV_PBEsol_conf
class econf():
def __init__(self, symbol):
"""
reference electronic configuration of elements.
"""
self.symbol = symbol
self.whole_conf = [[None] + list(Ha2eV(x))[:-1] + [None]
for x in configurations[symbol][1]]
def whole_configuration(self):
return self.whole_conf
def valence_configuration(self, ncore=None, valence=0):
if ncore is None:
ncore = core_states(self.symbol)
# electronic configuration of 0 valence atom.
neutral_conf = self.whole_conf[ncore:]
v_conf = copy.copy(neutral_conf)
v_conf = atomconf_to_ionconf(v_conf, valence)
return v_conf
def wannier_conf(self, ncore=None, valence=0):
vconf = self.valence_configuration(ncore=ncore, valence=valence)
c = []
for s in vconf:
m, n, l, occ, spin = s
c.append([None, l, n, occ, None])
return c
def get_oncv_econf(elem, fname):
with open(fname) as myfile:
lines = myfile.readlines()
for line in lines[-100:]:
if line.strip().startswith(elem):
try:
e = line.strip().split()[0]
except:
pass
if elem == e:
w = line.strip().split()
z = float(w[1])
nc = int(w[2])
nv = int(w[3])
return elem, z, nc, nv
def atomconf_to_ionconf(neutral_conf, valence):
v_conf = copy.copy(neutral_conf)
if valence < 0:
m, l, n, occ, spin = v_conf[-1]
occ -= valence
if occ < 0 or occ > (2 * l + 1) * 2:
raise ValueError("occupation cannot be <0 or > 2l+1")
v_conf[-1] = (m, l, n, occ, spin)
elif valence > 0:
ind = -1
nel = valence
while nel > 0:
m, l, n, occ, spin = v_conf[ind]
if occ < nel:
nel -= occ
occ = 0
v_conf[ind] = m, l, n, occ, spin
else:
occ -= nel
nel = 0
v_conf[ind] = m, l, n, occ, spin
break
ind -= 1
return v_conf
def gen_conf_dict():
evlist = [('Be', 2), ('Mg', 2), ('Ca', 2), ('Sr', 2), ('Ba', 2), ('Pb', 2),
('Bi', 3), ('Li', 1), ('Na', 1)]
conf_dict = {}
for ev in evlist:
elem, val = ev
conf = econf(elem).wannier_conf(valence=val)
conf_dict[ev] = conf
print conf_dict[('Li', 1)]
return conf_dict
def gen_ion_conf_dict(evlist, atom_conf_dict):
conf_dict = {}
# in case a dict is "wrongly" used, which is often the case.
if isinstance(evlist,dict):
evlist=zip(evlist.keys(),evlist.values())
for ev in evlist:
elem, val = ev
conf = atomconf_to_ionconf(atom_conf_dict[elem], val)
conf_dict[ev] = conf
return conf_dict
def Ha2eV(x):
"""
(n,l,occ,energy) energy Ha->eV.
"""
n, l, occ, energy = x
return (n, l, occ, energy * Ha)
def test():
ec = econf('O')
print(ec.whole_configuration())
print(ec.valence_configuration(valence=-2))
print(econf('P').valence_configuration(valence=-3))
print(econf('Ba').valence_configuration(valence=4))
print(econf('Pb').valence_configuration(valence=4))
print(econf('Ca').valence_configuration(valence=4))
print('Ca', ONCV_PBEsol_conf['Be'])
print(gen_ion_conf_dict([('Be', 1)], ONCV_PBEsol_conf))
#test()
#print gen_conf_dict()
|
mailhexu/pyDFTutils
|
resource/phonon_ana/pyFA/psp.py
|
Python
|
lgpl-3.0
| 3,737
|
[
"ASE"
] |
d3e106494cb9e55ceeafd177ba521419b99ca7902926fda4d238fea513a9052c
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".graphicstyle")
#-------------------------------------------------------------------------
#
# Line style
#
#-------------------------------------------------------------------------
SOLID = 0
DASHED = 1
DOTTED = 2
# Notes about adding new line styles:
# 1) the first style is used when an invalid style is specified by the report
# 2) the style names are used by the ODF generator and should be unique
# 3) the line style constants above need to be imported in the
# gen.plug.docgen.__init__ file so they can be used in a report add-on
line_style_names = ('solid', 'dashed', 'dotted')
_DASH_ARRAY = [ [1, 0], [2, 4], [1, 2] ]
def get_line_style_by_name(style_name):
which = 0
for (idx, sn) in enumerate(line_style_names):
if sn == style_name:
which = idx
break
return _DASH_ARRAY[which]
#------------------------------------------------------------------------
#
# GraphicsStyle
#
#------------------------------------------------------------------------
class GraphicsStyle:
"""
Defines the properties of graphics objects, such as line width,
color, fill, ect.
"""
def __init__(self, obj=None):
"""
Initialize the object with default values, unless a source
object is specified. In that case, make a copy of the source
object.
"""
if obj:
self.para_name = obj.para_name
self.shadow = obj.shadow
self.shadow_space = obj.shadow_space
self.color = obj.color
self.fill_color = obj.fill_color
self.lwidth = obj.lwidth
self.lstyle = obj.lstyle
else:
self.para_name = ""
self.shadow = 0
self.shadow_space = 0.2
self.lwidth = 0.5
self.color = (0, 0, 0)
self.fill_color = (255, 255, 255)
self.lstyle = SOLID
def set_line_width(self, val):
"""
sets the line width
"""
self.lwidth = val
def get_line_width(self):
"""
Return the name of the StyleSheet
"""
return self.lwidth
def get_line_style(self):
return self.lstyle
def set_line_style(self, val):
self.lstyle = val
def get_dash_style(self, val = None):
if val is None:
val = self.lstyle
if val >= 0 and val < len(_DASH_ARRAY):
return _DASH_ARRAY[val]
else:
return _DASH_ARRAY[0]
def get_dash_style_name(self, val=None):
if val is None:
val = self.lstyle
if val >= 0 and val < len(line_style_names):
return line_style_names[val]
else:
return line_style_names[0]
def set_paragraph_style(self, val):
self.para_name = val
def set_shadow(self, val, space=0.2):
self.shadow = val
self.shadow_space = space
def get_shadow_space(self):
return self.shadow_space
def set_color(self, val):
self.color = val
def set_fill_color(self, val):
self.fill_color = val
def get_paragraph_style(self):
return self.para_name
def get_shadow(self):
return self.shadow
def get_color(self):
return self.color
def get_fill_color(self):
return self.fill_color
|
sam-m888/gprime
|
gprime/plug/docgen/graphicstyle.py
|
Python
|
gpl-2.0
| 4,906
|
[
"Brian"
] |
eff23ff405ddf31b1f60b0d448a14d700e106791a9ee137a966b60bc90be91ba
|
# -*- coding: utf-8 -*-
import os
import csv
import unittest
import pytest
from sqlalchemy.orm import Session, session
from skosprovider_sqlalchemy.models import Base, Initialiser
from skosprovider_sqlalchemy.utils import (
import_provider,
VisitationCalculator
)
from tests import DBTestCase
def _get_menu():
from skosprovider.providers import (
SimpleCsvProvider
)
ifile = open(
os.path.join(os.path.dirname(__file__), 'data', 'menu.csv'),
"r"
)
reader = csv.reader(ifile)
csvprovider = SimpleCsvProvider(
{'id': 'MENU'},
reader
)
ifile.close()
return csvprovider
def _get_geo():
from skosprovider.providers import DictionaryProvider
geo = DictionaryProvider(
{'id': 'GEOGRAPHY'},
[
{
'id': '1',
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'World'}
],
'narrower': [2, 3]
}, {
'id': 2,
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'Europe'}
],
'narrower': [4, 5, 10], 'broader': [1]
}, {
'id': 3,
'labels': [
{
'type': 'prefLabel', 'language': 'en',
'label': 'North-America'
}
],
'narrower': [6], 'broader': [1]
}, {
'id': 4,
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'Belgium'}
],
'narrower': [7, 8, 9], 'broader': [2], 'related': [10]
}, {
'id': 5,
'labels': [
{
'type': 'prefLabel', 'language': 'en',
'label': 'United Kingdom'
}
],
'broader': [2]
}, {
'id': 6,
'labels': [
{
'type': 'prefLabel', 'language': 'en',
'label': 'United States of America'
}
],
'broader': [3]
}, {
'id': 7,
'labels': [
{
'type': 'prefLabel',
'language': 'en',
'label': 'Flanders'
}, {
'type': 'prefLabel',
'language': 'nl-BE',
'label': 'Vlaanderen'
}
],
'broader': [4]
}, {
'id': 8,
'labels': [
{
'type': 'prefLabel',
'language': 'en',
'label': 'Brussels'
}
],
'broader': [4]
}, {
'id': 9,
'labels': [
{
'type': 'prefLabel',
'language': 'en',
'label': 'Wallonie'
}
],
'broader': [4]
}, {
'id': 10,
'labels': [
{
'type': 'prefLabel',
'language': 'nl',
'label': 'Nederland'
}
],
'related': [4]
}, {
'id': '333',
'type': 'collection',
'labels': [
{
'type': 'prefLabel', 'language': 'en',
'label': 'Places where dutch is spoken'
}
],
'members': ['4', '7', 8, 10]
}
]
)
return geo
def _get_buildings():
from skosprovider.providers import DictionaryProvider
buildings = DictionaryProvider(
{'id': 'BUILDINGS'},
[
{
'id': '1',
'labels': [
{
'type': 'prefLabel',
'language': 'en',
'label': 'Fortifications'
}
],
'narrower': [2],
'matches': {
'exact': ['http://vocab.getty.edu/aat/300006888']
}
}, {
'id': 2,
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'Castle'}
],
'broader': [1, 3],
'matches': {
'broad': ['http://vocab.getty.edu/aat/300006888']
}
}, {
'id': 3,
'labels': [
{
'type': 'prefLabel',
'language': 'en',
'label': 'Habitations'
}
],
'narrower': [2, 4],
'matches': {
'close': ['http://vocab.getty.edu/aat/300005425']
}
}, {
'id': 4,
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'Huts'},
{'type': 'prefLabel', 'language': None, 'label': 'Hutten'}
],
'broader': [3],
'matches': {
'exact': ['http://vocab.getty.edu/aat/300004824']
}
}
]
)
return buildings
def _get_materials():
from skosprovider.providers import DictionaryProvider
materials = DictionaryProvider(
{'id': 'MATERIALS'},
[
{
'id': '1',
'labels': [
{
'type': 'prefLabel',
'language': 'en',
'label': 'Cardboard'
}
],
'narrower': [2],
'related': [3],
'subordinate_arrays': [56]
}, {
'id': '789',
'type': 'collection',
'labels': [
{
'type': 'prefLabel',
'language': 'en',
'label': 'Wood by Tree'
}
],
'members': [654]
}
]
)
return materials
def _get_heritage_types():
import json
typology_data = json.load(
open(os.path.join(os.path.dirname(__file__), 'data', 'typologie.js')),
)['typologie']
from skosprovider.providers import DictionaryProvider
from skosprovider.uri import UriPatternGenerator
from skosprovider.skos import ConceptScheme
heritage_types = DictionaryProvider(
{'id': 'HERITAGE_TYPES'},
typology_data,
uri_generator=UriPatternGenerator('https://id.erfgoed.net/thesauri/erfgoedtypes/%s'),
concept_scheme=ConceptScheme(
uri='https://id.erfgoed.net/thesauri/erfgoedtypes',
labels=[
{'label': 'Erfgoedtypes', 'type': 'prefLabel', 'language': 'nl-BE'},
{'label': 'Heritagetypes', 'type': 'prefLabel', 'language': 'en'}
],
notes=[
{
'note': 'Different types of heritage.',
'type': 'definition',
'language': 'en'
}, {
'note': 'Verschillende types van erfgoed.',
'type': 'definition',
'language': 'nl'
}
],
languages=['nl', 'en']
)
)
return heritage_types
def _get_event_types():
import json
event_data = json.load(
open(os.path.join(os.path.dirname(__file__), 'data', 'gebeurtenis.js')),
)['gebeurtenis']
from skosprovider.providers import DictionaryProvider
from skosprovider.uri import UriPatternGenerator
heritage_types = DictionaryProvider(
{'id': 'EVENT_TYPES'},
event_data,
uri_generator=UriPatternGenerator('https://id.erfgoed.net/thesauri/gebeurtenistypes/%s')
)
return heritage_types
class TestImportProviderTests(DBTestCase):
def setUp(self):
Base.metadata.create_all(self.engine)
self.session = self.session_maker()
Initialiser(self.session).init_all()
def tearDown(self):
self.session.rollback()
session.close_all_sessions()
Base.metadata.drop_all(self.engine)
def _get_cs(self):
from skosprovider_sqlalchemy.models import (
ConceptScheme as ConceptSchemeModel
)
return ConceptSchemeModel(
id=68,
uri='urn:x-skosprovider:cs:68'
)
def test_empty_provider(self):
from skosprovider_sqlalchemy.models import (
ConceptScheme as ConceptSchemeModel
)
from skosprovider.providers import DictionaryProvider
p = DictionaryProvider({'id': 'EMPTY'}, [])
cs = self._get_cs()
self.session.add(cs)
import_provider(p, cs, self.session)
scheme = self.session.query(ConceptSchemeModel).get(68)
assert scheme == cs
def test_menu(self):
from skosprovider_sqlalchemy.models import (
Concept as ConceptModel
)
csvprovider = _get_menu()
cs = self._get_cs()
self.session.add(cs)
import_provider(csvprovider, cs, self.session)
lobster = self.session.query(ConceptModel) \
.filter(ConceptModel.conceptscheme == cs) \
.filter(ConceptModel.concept_id == 11) \
.one()
assert 11 == lobster.concept_id
assert 'urn:x-skosprovider:menu:11' == lobster.uri
assert 'Lobster Thermidor' == str(lobster.label())
assert 1 == len(lobster.notes)
def test_geo(self):
from skosprovider_sqlalchemy.models import (
Concept as ConceptModel,
Collection as CollectionModel
)
geoprovider = _get_geo()
cs = self._get_cs()
self.session.add(cs)
import_provider(geoprovider, cs, self.session)
world = self.session.query(ConceptModel) \
.filter(ConceptModel.conceptscheme == cs) \
.filter(ConceptModel.concept_id == 1) \
.one()
assert world.concept_id == 1
assert 'urn:x-skosprovider:geography:1' == world.uri
assert 'World' == str(world.label('en'))
assert 1 == len(world.labels)
assert 2 == len(world.narrower_concepts)
dutch = self.session.query(CollectionModel) \
.filter(CollectionModel.conceptscheme == cs) \
.filter(CollectionModel.concept_id == 333) \
.one()
assert 333 == dutch.concept_id
assert 'urn:x-skosprovider:geography:333' == dutch.uri
assert 'collection' == dutch.type
assert 1 == len(dutch.labels)
assert 4 == len(dutch.members)
netherlands = self.session.query(ConceptModel) \
.filter(ConceptModel.conceptscheme == cs) \
.filter(ConceptModel.concept_id == 10) \
.one()
assert 10 == netherlands.concept_id
assert 'concept' == netherlands.type
assert 1 == len(netherlands.labels)
assert 2 == netherlands.broader_concepts.pop().concept_id
assert 1 == len(netherlands.related_concepts)
def test_buildings(self):
from skosprovider_sqlalchemy.models import (
Concept as ConceptModel
)
buildingprovider = _get_buildings()
cs = self._get_cs()
self.session.add(cs)
import_provider(buildingprovider, cs, self.session)
castle = self.session.query(ConceptModel) \
.filter(ConceptModel.conceptscheme == cs) \
.filter(ConceptModel.concept_id == 2) \
.one()
assert 2 == len(castle.broader_concepts)
hut = self.session.query(ConceptModel) \
.filter(ConceptModel.conceptscheme == cs) \
.filter(ConceptModel.concept_id == 4) \
.one()
assert 1 == len(hut.broader_concepts)
assert 1 == len(hut.matches)
assert 'exactMatch' == hut.matches[0].matchtype_id
assert 'http://vocab.getty.edu/aat/300004824' == hut.matches[0].uri
def test_heritage_types(self):
from skosprovider_sqlalchemy.models import (
Concept as ConceptModel,
)
heritagetypesprovider = _get_heritage_types()
cs = self._get_cs()
self.session.add(cs)
import_provider(heritagetypesprovider, cs, self.session)
bomen = self.session.query(ConceptModel) \
.filter(ConceptModel.conceptscheme == cs) \
.filter(ConceptModel.concept_id == 72) \
.one()
assert 2 == len(bomen.narrower_collections)
assert 2 == len(cs.labels)
assert 'Erfgoedtypes' == cs.label('nl').label
assert 2 == len(cs.notes)
assert 2 == len(cs.languages)
def test_event_types(self):
from skosprovider_sqlalchemy.models import (
Concept as ConceptModel,
)
eventtypesprovider = _get_event_types()
cs = self._get_cs()
self.session.add(cs)
import_provider(eventtypesprovider, cs, self.session)
archeologische_opgravingen = self.session.query(ConceptModel) \
.filter(ConceptModel.conceptscheme == cs) \
.filter(ConceptModel.concept_id == 38) \
.one()
assert 3 == len(archeologische_opgravingen.narrower_collections)
def test_materials(self):
from skosprovider_sqlalchemy.models import (
Thing as ThingModel,
)
materialsprovider = _get_materials()
cs = self._get_cs()
self.session.add(cs)
import_provider(materialsprovider, cs, self.session)
materials = self.session.query(ThingModel) \
.filter(ThingModel.conceptscheme == cs) \
.all()
assert 2 == len(materials)
class TestVisitationCalculator(DBTestCase):
def setUp(self):
Base.metadata.create_all(self.engine)
self.session = self.session_maker()
Initialiser(self.session).init_all()
def tearDown(self):
self.session.rollback()
session.close_all_sessions()
Base.metadata.drop_all(self.engine)
def _get_cs(self):
from skosprovider_sqlalchemy.models import (
ConceptScheme as ConceptSchemeModel
)
return ConceptSchemeModel(
id=1,
uri='urn:x-skosprovider:cs:1'
)
def test_empty_provider(self):
from skosprovider.providers import DictionaryProvider
p = DictionaryProvider({'id': 'EMPTY'}, [])
cs = self._get_cs()
self.session.add(cs)
import_provider(p, cs, self.session)
vc = VisitationCalculator(self.session)
v = vc.visit(cs)
assert 0 == len(v)
def test_provider_invalid_language(self):
from skosprovider.providers import DictionaryProvider
with self.assertRaises(ValueError):
p = DictionaryProvider({'id': 'EMPTY'}, [
{
'id': '1',
'labels': [
{
'type': 'prefLabel',
'language': 'nederlands',
'label': 'Versterkingen'
}
]
}
])
cs = self._get_cs()
self.session.add(cs)
import_provider(p, cs, self.session)
def test_menu(self):
csvprovider = _get_menu()
cs = self._get_cs()
self.session.add(cs)
import_provider(csvprovider, cs, self.session)
vc = VisitationCalculator(self.session)
visit = vc.visit(cs)
assert 11 == len(visit)
for v in visit:
assert v['lft'] + 1 == v['rght']
assert 1 == v['depth']
def test_menu_sorted(self):
csvprovider = _get_menu()
cs = self._get_cs()
self.session.add(cs)
import_provider(csvprovider, cs, self.session)
vc = VisitationCalculator(self.session)
visit = vc.visit(cs)
assert 11 == len(visit)
left = 1
for v in visit:
assert v['lft'] == left
left += 2
def test_geo(self):
from skosprovider_sqlalchemy.models import (
Concept as ConceptModel
)
geoprovider = _get_geo()
cs = self._get_cs()
self.session.add(cs)
import_provider(geoprovider, cs, self.session)
vc = VisitationCalculator(self.session)
visit = vc.visit(cs)
assert 10 == len(visit)
world = visit[0]
assert self.session.query(ConceptModel).get(world['id']).concept_id == 1
assert 1 == world['lft']
assert 20 == world['rght']
assert 1 == world['depth']
for v in visit:
if v['id'] == 3:
assert v['lft'] + 3 == v['rght']
assert 2 == v['depth']
if v['id'] == 6:
assert v['lft'] + 1 == v['rght']
assert 3 == v['depth']
def test_buildings(self):
from skosprovider_sqlalchemy.models import (
Concept as ConceptModel
)
buildingprovider = _get_buildings()
cs = self._get_cs()
self.session.add(cs)
import_provider(buildingprovider, cs, self.session)
vc = VisitationCalculator(self.session)
visit = vc.visit(cs)
assert len(visit) == 5
# Check that castle is present twice
ids = [self.session.query(ConceptModel).get(v['id']).concept_id for v in visit]
assert ids.count(2) == 2
for v in visit:
# Check that fortification has one child
if v['id'] == 1:
assert v['lft'] + 3 == v['rght']
assert 1 == v['depth']
# Check that habitations has two children
if v['id'] == 3:
assert v['lft'] + 5 == v['rght']
assert 1 == v['depth']
# Check that castle has no children
if v['id'] == 2:
assert v['lft'] + 1 == v['rght']
assert 2 == v['depth']
|
koenedaele/skosprovider_sqlalchemy
|
tests/test_utils.py
|
Python
|
mit
| 18,760
|
[
"VisIt"
] |
a63446ac7dd90f554cc87707757b835f59d09eb91346929526f5259c71935566
|
from collections import namedtuple, defaultdict
import copy
from functools import total_ordering
from itertools import izip_longest
import os
from os.path import commonprefix
import re
import urllib
from eulxml.xmlmap import StringField, XmlObject, IntegerField, NodeListField, NodeField, load_xmlobject_from_string
from lxml import etree
from xml.sax.saxutils import escape, unescape
from django.core.urlresolvers import reverse
from .exceptions import (
MediaResourceError,
ParentModuleReferenceError,
SuiteError,
SuiteValidationError,
)
from corehq.feature_previews import MODULE_FILTER
from corehq.apps.app_manager import id_strings
from corehq.apps.app_manager.const import CAREPLAN_GOAL, CAREPLAN_TASK, SCHEDULE_LAST_VISIT, SCHEDULE_PHASE, \
CASE_ID, RETURN_TO, USERCASE_ID, USERCASE_TYPE
from corehq.apps.app_manager.exceptions import UnknownInstanceError, ScheduleError, FormNotFoundException
from corehq.apps.app_manager.templatetags.xforms_extras import trans
from corehq.apps.app_manager.util import split_path, create_temp_sort_column, languages_mapping, \
actions_use_usercase
from corehq.apps.app_manager.xform import SESSION_CASE_ID, autoset_owner_id_for_open_case, \
autoset_owner_id_for_subcase
from corehq.apps.app_manager.xpath import interpolate_xpath, CaseIDXPath, session_var, \
CaseTypeXpath, ItemListFixtureXpath, ScheduleFixtureInstance, XPath, ProductInstanceXpath, UserCaseXPath
from corehq.apps.hqmedia.models import HQMediaMapItem
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import get_url_base
FIELD_TYPE_ATTACHMENT = 'attachment'
FIELD_TYPE_INDICATOR = 'indicator'
FIELD_TYPE_LOCATION = 'location'
FIELD_TYPE_PROPERTY = 'property'
FIELD_TYPE_LEDGER = 'ledger'
FIELD_TYPE_SCHEDULE = 'schedule'
class XPathField(StringField):
"""
A string field that is supposed to contain an arbitrary xpath expression
"""
pass
class OrderedXmlObject(XmlObject):
ORDER = ()
def __init__(self, *args, **kwargs):
ordered_pairs = []
for attr in self.ORDER:
value = kwargs.pop(attr, None)
if value:
ordered_pairs.append((attr, value))
super(OrderedXmlObject, self).__init__(*args, **kwargs)
for attr, value in ordered_pairs:
setattr(self, attr, value)
class IdNode(XmlObject):
id = StringField('@id')
class XpathVariable(XmlObject):
ROOT_NAME = 'variable'
name = StringField('@name')
locale_id = StringField('locale/@id')
class Xpath(XmlObject):
ROOT_NAME = 'xpath'
function = XPathField('@function')
variables = NodeListField('variable', XpathVariable)
class LocaleArgument(XmlObject):
ROOT_NAME = 'argument'
key = StringField('@key')
value = StringField('.')
class Locale(XmlObject):
ROOT_NAME = 'locale'
id = StringField('@id')
arguments = NodeListField('argument', LocaleArgument)
class Text(XmlObject):
"""
<text> <!----------- Exactly one. Will be present wherever text can be defined. Contains a sequential list of string elements to be concatenated to form the text body.-->
<xpath function=""> <!------------ 0 or More. An xpath function whose result is a string. References a data model if used in a context where one exists. -->
<variable name=""/> <!------------ 0 or More. Variable for the localized string. Variable elements can support any child elements that <body> can. -->
</xpath>
<locale id=""> <!------------ 0 or More. A localized string. id can be referenced here or as a child-->
<id/> <!------------ At Most One. The id of the localized string (if not provided as an attribute -->
<argument key=""/> <!------------ 0 or More. Arguments for the localized string. Key is optional. Arguments can support any child elements that <body> can. -->
</locale>
</text>
"""
ROOT_NAME = 'text'
xpath = NodeField('xpath', Xpath)
xpath_function = XPathField('xpath/@function')
locale = NodeField('locale', Locale)
locale_id = StringField('locale/@id')
class ConfigurationItem(Text):
ROOT_NAME = "text"
id = StringField("@id")
class ConfigurationGroup(XmlObject):
ROOT_NAME = 'configuration'
configs = NodeListField('text', ConfigurationItem)
class Series(OrderedXmlObject):
ORDER = (
"configuration",
"x_function",
"y_function",
"radius_function",
)
ROOT_NAME = 'series'
nodeset = StringField('@nodeset')
configuration = NodeField('configuration', ConfigurationGroup)
x_function = StringField('x/@function')
y_function = StringField('y/@function')
radius_function = StringField("radius/@function")
class Annotation(OrderedXmlObject):
ORDER = ("x", "y", "text")
ROOT_NAME = 'annotation'
# TODO: Specify the xpath without specifying "text" for the child (we want the Text class to specify the tag)
x = NodeField('x/text', Text)
y = NodeField('y/text', Text)
text = NodeField('text', Text)
class Graph(XmlObject):
ROOT_NAME = 'graph'
type = StringField("@type", choices=["xy", "bubble"])
series = NodeListField('series', Series)
configuration = NodeField('configuration', ConfigurationGroup)
annotations = NodeListField('annotation', Annotation)
class AbstractResource(OrderedXmlObject):
ORDER = ('id', 'version', 'local', 'remote')
LOCATION_TEMPLATE = 'resource/location[@authority="%s"]'
local = StringField(LOCATION_TEMPLATE % 'local', required=True)
remote = StringField(LOCATION_TEMPLATE % 'remote', required=True)
version = IntegerField('resource/@version')
id = StringField('resource/@id')
descriptor = StringField('resource/@descriptor')
class XFormResource(AbstractResource):
ROOT_NAME = 'xform'
class LocaleResource(AbstractResource):
ROOT_NAME = 'locale'
language = StringField('@language')
class MediaResource(AbstractResource):
ROOT_NAME = 'media'
path = StringField('@path')
class Display(OrderedXmlObject):
ROOT_NAME = 'display'
ORDER = ('text', 'media_image', 'media_audio')
text = NodeField('text', Text)
media_image = StringField('media/@image')
media_audio = StringField('media/@audio')
class DisplayNode(XmlObject):
"""
Mixin for any node that has the awkward text-or-display subnode,
like Command or Menu
"""
text = NodeField('text', Text)
display = NodeField('display', Display)
def __init__(self, node=None, context=None,
locale_id=None, media_image=None, media_audio=None, **kwargs):
super(DisplayNode, self).__init__(node, context, **kwargs)
self.set_display(
locale_id=locale_id,
media_image=media_image,
media_audio=media_audio,
)
def set_display(self, locale_id=None, media_image=None, media_audio=None):
text = Text(locale_id=locale_id) if locale_id else None
if media_image or media_audio:
self.display = Display(
text=text,
media_image=media_image,
media_audio=media_audio,
)
elif text:
self.text = text
class Command(DisplayNode, IdNode):
ROOT_NAME = 'command'
relevant = StringField('@relevant')
class Instance(IdNode, OrderedXmlObject):
ROOT_NAME = 'instance'
ORDER = ('id', 'src')
src = StringField('@src')
class SessionDatum(IdNode, OrderedXmlObject):
ROOT_NAME = 'datum'
ORDER = ('id', 'nodeset', 'value', 'function', 'detail_select', 'detail_confirm', 'detail_persistent', 'detail_inline')
nodeset = XPathField('@nodeset')
value = StringField('@value')
function = XPathField('@function')
detail_select = StringField('@detail-select')
detail_confirm = StringField('@detail-confirm')
detail_persistent = StringField('@detail-persistent')
detail_inline = StringField('@detail-inline')
class StackDatum(IdNode):
ROOT_NAME = 'datum'
value = XPathField('@value')
class StackCommand(XmlObject):
ROOT_NAME = 'command'
value = XPathField('@value')
command = StringField('.')
class BaseFrame(XmlObject):
if_clause = XPathField('@if')
class CreatePushBase(IdNode, BaseFrame):
datums = NodeListField('datum', StackDatum)
commands = NodeListField('command', StackCommand)
def add_command(self, command):
node = etree.SubElement(self.node, 'command')
node.attrib['value'] = command
def add_datum(self, datum):
self.node.append(datum.node)
class CreateFrame(CreatePushBase):
ROOT_NAME = 'create'
class PushFrame(CreatePushBase):
ROOT_NAME = 'push'
class ClearFrame(BaseFrame):
ROOT_NAME = 'clear'
frame = StringField('@frame')
FRAME_CLASSES = (CreateFrame, PushFrame, ClearFrame)
FRAME_CLASSES_BY_ROOT = {frame_type.ROOT_NAME: frame_type
for frame_type in FRAME_CLASSES}
def _wrap_frame(frame):
return FRAME_CLASSES_BY_ROOT[frame.tag](frame)
class Stack(XmlObject):
ROOT_NAME = 'stack'
frames = NodeListField('*', _wrap_frame)
def add_frame(self, frame):
self.node.append(frame.node)
class Assertion(XmlObject):
ROOT_NAME = 'assert'
test = XPathField('@test')
text = NodeListField('text', Text)
class Entry(OrderedXmlObject, XmlObject):
ROOT_NAME = 'entry'
ORDER = ('form', 'command', 'instance', 'datums')
form = StringField('form')
command = NodeField('command', Command)
instances = NodeListField('instance', Instance)
datums = NodeListField('session/datum', SessionDatum)
stack = NodeField('stack', Stack)
assertions = NodeListField('assertions/assert', Assertion)
def require_instance(self, *instances):
used = {(instance.id, instance.src) for instance in self.instances}
for instance in instances:
if (instance.id, instance.src) not in used:
self.instances.append(
# it's important to make a copy,
# since these can't be reused
Instance(id=instance.id, src=instance.src)
)
# make sure the first instance gets inserted
# right after the command
# once you "suggest" a placement to eulxml,
# it'll follow your lead and place the rest of them there too
if len(self.instances) == 1:
instance_node = self.node.find('instance')
command_node = self.node.find('command')
self.node.remove(instance_node)
self.node.insert(self.node.index(command_node) + 1,
instance_node)
sorted_instances = sorted(self.instances,
key=lambda instance: instance.id)
if sorted_instances != self.instances:
self.instances = sorted_instances
class Menu(DisplayNode, IdNode):
ROOT_NAME = 'menu'
root = StringField('@root')
relevant = XPathField('@relevant')
commands = NodeListField('command', Command)
class AbstractTemplate(XmlObject):
form = StringField('@form', choices=['image', 'phone', 'address'])
width = IntegerField('@width')
text = NodeField('text', Text)
class Template(AbstractTemplate):
ROOT_NAME = 'template'
class GraphTemplate(Template):
# TODO: Is there a way to specify a default/static value for form?
form = StringField('@form', choices=['graph'])
graph = NodeField('graph', Graph)
class Header(AbstractTemplate):
ROOT_NAME = 'header'
class Sort(AbstractTemplate):
ROOT_NAME = 'sort'
type = StringField('@type')
order = StringField('@order')
direction = StringField('@direction')
class Style(XmlObject):
ROOT_NAME = 'style'
horz_align = StringField("@horz-align")
vert_align = StringField("@vert-align")
font_size = StringField("@font-size")
css_id = StringField("@css-id")
grid_height = StringField("grid/@grid-height")
grid_width = StringField("grid/@grid-width")
grid_x = StringField("grid/@grid-x")
grid_y = StringField("grid/@grid-y")
class Extra(XmlObject):
ROOT_NAME = 'extra'
key = StringField("@key")
value = StringField("@value")
class Response(XmlObject):
ROOT_NAME = 'response'
key = StringField("@key")
class Lookup(XmlObject):
ROOT_NAME = 'lookup'
name = StringField("@name")
action = StringField("@action", required=True)
image = StringField("@image")
extras = NodeListField('extra', Extra)
responses = NodeListField('response', Response)
class Field(OrderedXmlObject):
ROOT_NAME = 'field'
ORDER = ('header', 'template', 'sort_node')
sort = StringField('@sort')
style = NodeField('style', Style)
header = NodeField('header', Header)
template = NodeField('template', Template)
sort_node = NodeField('sort', Sort)
background = NodeField('background/text', Text)
class Action(OrderedXmlObject):
ROOT_NAME = 'action'
ORDER = ('display', 'stack')
stack = NodeField('stack', Stack)
display = NodeField('display', Display)
class DetailVariable(XmlObject):
ROOT_NAME = '_'
function = XPathField('@function')
def get_name(self):
return self.node.tag
def set_name(self, value):
self.node.tag = value
name = property(get_name, set_name)
class DetailVariableList(XmlObject):
ROOT_NAME = 'variables'
variables = NodeListField('_', DetailVariable)
class Detail(OrderedXmlObject, IdNode):
"""
<detail id="">
<title><text/></title>
<lookup action="" image="" name="">
<extra key="" value = "" />
<response key ="" />
</lookup>
<variables>
<__ function=""/>
</variables>
<field sort="">
<header form="" width=""><text/></header>
<template form="" width=""><text/></template>
</field>
</detail>
"""
ROOT_NAME = 'detail'
ORDER = ('title', 'lookup', 'fields')
title = NodeField('title/text', Text)
lookup = NodeField('lookup', Lookup)
fields = NodeListField('field', Field)
action = NodeField('action', Action)
details = NodeListField('detail', "self")
_variables = NodeField('variables', DetailVariableList)
def get_all_fields(self):
'''
Return all fields under this Detail instance and all fields under
any details that may be under this instance.
:return:
'''
all_fields = []
for detail in [self] + list(self.details):
all_fields.extend(list(detail.fields))
return all_fields
def _init_variables(self):
if self._variables is None:
self._variables = DetailVariableList()
def get_variables(self):
self._init_variables()
return self._variables.variables
def set_variables(self, value):
self._init_variables()
self._variables.variables = value
variables = property(get_variables, set_variables)
def get_all_xpaths(self):
result = set()
if self._variables:
for variable in self.variables:
result.add(variable.function)
for field in self.get_all_fields():
try:
result.add(field.header.text.xpath_function)
result.add(field.template.text.xpath_function)
except AttributeError:
# Its a Graph detail
# convert Template to GraphTemplate
s = etree.tostring(field.template.node)
template = load_xmlobject_from_string(s, xmlclass=GraphTemplate)
for series in template.graph.series:
result.add(series.nodeset)
result.discard(None)
return result
class Fixture(IdNode):
ROOT_NAME = 'fixture'
user_id = StringField('@user_id')
def set_content(self, xml):
for child in self.node:
self.node.remove(child)
self.node.append(xml)
class ScheduleVisit(IdNode):
ROOT_NAME = 'visit'
due = StringField('@due')
late_window = StringField('@late_window')
class Schedule(XmlObject):
ROOT_NAME = 'schedule'
expires = StringField('@expires')
post_schedule_increment = StringField('@post_schedule_increment')
visits = NodeListField('visit', ScheduleVisit)
class ScheduleFixture(Fixture):
schedule = NodeField('schedule', Schedule)
class Suite(OrderedXmlObject):
ROOT_NAME = 'suite'
ORDER = ('version', 'descriptor')
version = IntegerField('@version')
xform_resources = NodeListField('xform', XFormResource)
locale_resources = NodeListField('locale', LocaleResource)
media_resources = NodeListField('locale', MediaResource)
details = NodeListField('detail', Detail)
entries = NodeListField('entry', Entry)
menus = NodeListField('menu', Menu)
fixtures = NodeListField('fixture', Fixture)
descriptor = StringField('@descriptor')
@total_ordering
class DatumMeta(object):
"""
Class used in computing the form workflow. Allows comparison by SessionDatum.id and reference
to SessionDatum.nodeset and SessionDatum.function attributes.
"""
type_regex = re.compile("\[@case_type='([\w_]+)'\]")
def __init__(self, session_datum):
self.id = session_datum.id
self.nodeset = session_datum.nodeset
self.function = session_datum.function
self.source_id = self.id
@property
@memoized
def case_type(self):
if not self.nodeset:
return None
match = self.type_regex.search(self.nodeset)
return match.group(1)
def __lt__(self, other):
return self.id < other.id
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return not self == other
def __repr__(self):
return 'DatumMeta(id={}, case_type={}, source_id={})'.format(self.id, self.case_type, self.source_id)
def get_default_sort_elements(detail):
from corehq.apps.app_manager.models import SortElement
if not detail.columns:
return []
def get_sort_params(column):
if column.field_type == FIELD_TYPE_LEDGER:
return dict(type='int', direction='descending')
else:
return dict(type='string', direction='ascending')
col_0 = detail.get_column(0)
sort_elements = [SortElement(
field=col_0.field,
**get_sort_params(col_0)
)]
for column in detail.columns[1:]:
if column.field_type == FIELD_TYPE_LEDGER:
sort_elements.append(SortElement(
field=column.field,
**get_sort_params(column)
))
return sort_elements
def get_detail_column_infos(detail, include_sort):
"""
This is not intented to be a widely used format
just a packaging of column info into a form most convenient for rendering
"""
DetailColumnInfo = namedtuple('DetailColumnInfo',
'column sort_element order')
if not include_sort:
return [DetailColumnInfo(column, None, None) for column in detail.get_columns()]
if detail.sort_elements:
sort_elements = detail.sort_elements
else:
sort_elements = get_default_sort_elements(detail)
# order is 1-indexed
sort_elements = {s.field: (s, i + 1)
for i, s in enumerate(sort_elements)}
columns = []
for column in detail.get_columns():
sort_element, order = sort_elements.pop(column.field, (None, None))
columns.append(DetailColumnInfo(column, sort_element, order))
# sort elements is now populated with only what's not in any column
# add invisible columns for these
sort_only = sorted(sort_elements.items(),
key=lambda (field, (sort_element, order)): order)
for field, (sort_element, order) in sort_only:
column = create_temp_sort_column(field, len(columns))
columns.append(DetailColumnInfo(column, sort_element, order))
return columns
class SuiteGeneratorBase(object):
descriptor = None
sections = ()
def __init__(self, app):
self.app = app
# this is actually so slow it's worth caching
self.modules = list(self.app.get_modules())
self.id_strings = id_strings
def generate_suite(self):
suite = Suite(
version=self.app.version,
descriptor=self.descriptor,
)
def add_to_suite(attr):
getattr(suite, attr).extend(getattr(self, attr))
map(add_to_suite, self.sections)
self.post_process(suite)
return suite.serializeDocument(pretty=True)
def post_process(self, suite):
pass
GROUP_INSTANCE = Instance(id='groups', src='jr://fixture/user-groups')
REPORT_INSTANCE = Instance(id='reports', src='jr://fixture/commcare:reports')
LEDGER_INSTANCE = Instance(id='ledgerdb', src='jr://instance/ledgerdb')
CASE_INSTANCE = Instance(id='casedb', src='jr://instance/casedb')
SESSION_INSTANCE = Instance(id='commcaresession', src='jr://instance/session')
INSTANCE_BY_ID = {
instance.id: instance
for instance in (
GROUP_INSTANCE,
REPORT_INSTANCE,
LEDGER_INSTANCE,
CASE_INSTANCE,
SESSION_INSTANCE,
)
}
def get_instance_factory(scheme):
return get_instance_factory._factory_map.get(scheme, preset_instances)
get_instance_factory._factory_map = {}
class register_factory(object):
def __init__(self, *schemes):
self.schemes = schemes
def __call__(self, fn):
for scheme in self.schemes:
get_instance_factory._factory_map[scheme] = fn
return fn
@register_factory(*INSTANCE_BY_ID.keys())
def preset_instances(instance_name):
return INSTANCE_BY_ID.get(instance_name, None)
@register_factory('item-list', 'schedule', 'indicators', 'commtrack')
@memoized
def generic_fixture_instances(instance_name):
return Instance(id=instance_name, src='jr://fixture/{}'.format(instance_name))
class SuiteGenerator(SuiteGeneratorBase):
descriptor = u"Suite File"
sections = (
'xform_resources',
'locale_resources',
'details',
'entries',
'menus',
'fixtures',
)
def __init__(self, app, is_usercase_enabled=None):
super(SuiteGenerator, self).__init__(app)
self.is_usercase_enabled = is_usercase_enabled
def post_process(self, suite):
if self.app.enable_post_form_workflow:
self.add_form_workflow(suite)
details_by_id = self.get_detail_mapping()
relevance_by_menu, menu_by_command = self.get_menu_relevance_mapping()
for e in suite.entries:
self.add_referenced_instances(e, details_by_id, relevance_by_menu, menu_by_command)
def add_form_workflow(self, suite):
"""
post_form_workflow = 'module':
* Add stack frame and a command with value = "module command"
post_form_workflow = 'previous_screen':
* Add stack frame and a command with value = "module command"
* Find longest list of common datums between form entries for the module and add datums
to the stack frame for each.
* Add a command to the frame with value = "form command"
* Add datums to the frame for any remaining datums for that form.
* Remove any autoselect items from the end of the stack frame.
* Finally remove the last item from the stack frame.
"""
from corehq.apps.app_manager.models import (
WORKFLOW_DEFAULT, WORKFLOW_PREVIOUS, WORKFLOW_MODULE, WORKFLOW_ROOT, WORKFLOW_FORM
)
@memoized
def get_entry(suite, form_command):
entry = self.get_form_entry(suite, form_command)
if not entry.stack:
entry.stack = Stack()
return entry, True
else:
return entry, False
def create_workflow_stack(suite, form_command, frame_children,
allow_empty_stack=False, if_clause=None):
if not frame_children and not allow_empty_stack:
return
entry, is_new = get_entry(suite, form_command)
entry = self.get_form_entry(suite, form_command)
if not is_new:
# TODO: find a more general way of handling multiple contributions to the workflow
if_prefix = '{} = 0'.format(session_var(RETURN_TO).count())
template = '({{}}) and ({})'.format(if_clause) if if_clause else '{}'
if_clause = template.format(if_prefix)
if_clause = unescape(if_clause) if if_clause else None
frame = CreateFrame(if_clause=if_clause)
entry.stack.add_frame(frame)
for child in frame_children:
if isinstance(child, basestring):
frame.add_command(XPath.string(child))
else:
value = session_var(child.source_id) if child.nodeset else child.function
frame.add_datum(StackDatum(id=child.id, value=value))
return frame
root_modules = [module for module in self.modules if getattr(module, 'put_in_root', False)]
root_module_datums = [datum for module in root_modules
for datum in self.get_module_datums(suite, u'm{}'.format(module.id)).values()]
def get_frame_children(target_form, module_only=False):
"""
For a form return the list of stack frame children that are required
to navigate to that form.
This is based on the following algorithm:
* Add the module the form is in to the stack (we'll call this `m`)
* Walk through all forms in the module, determine what datum selections are present in all of the modules
(this may be an empty set)
* Basically if there are three forms that respectively load
* f1: v1, v2, v3, v4
* f2: v1, v2, v4
* f3: v1, v2
* The longest common chain is v1, v2
* Add a datum for each of those values to the stack
* Add the form "command id" for the <entry> to the stack
* Add the remainder of the datums for the current form to the stack
* For the three forms above, the stack entries for "last element" would be
* m, v1, v2, f1, v3, v4
* m, v1, v2, f2, v4
* m, v1, v2, f3
:returns: list of strings and DatumMeta objects. String represent stack commands
and DatumMeta's represent stack datums.
"""
target_form_command = self.id_strings.form_command(target_form)
target_module_id, target_form_id = target_form_command.split('-')
module_command = self.id_strings.menu_id(target_form.get_module())
module_datums = self.get_module_datums(suite, target_module_id)
form_datums = module_datums[target_form_id]
if module_command == self.id_strings.ROOT:
datums_list = root_module_datums
else:
datums_list = module_datums.values() # [ [datums for f0], [datums for f1], ...]
common_datums = commonprefix(datums_list)
remaining_datums = form_datums[len(common_datums):]
frame_children = [module_command] if module_command != self.id_strings.ROOT else []
frame_children.extend(common_datums)
if not module_only:
frame_children.append(target_form_command)
frame_children.extend(remaining_datums)
return frame_children
def get_datums_matched_to_source(target_frame_elements, source_datums):
"""
Attempt to match the target session variables with ones in the source session.
Making some large assumptions about how people will actually use this feature
"""
datum_index = -1
for child in target_frame_elements:
if not isinstance(child, DatumMeta) or child.function:
yield child
else:
datum_index += 1
try:
source_datum = source_datums[datum_index]
except IndexError:
yield child
else:
if child.id != source_datum.id and not source_datum.case_type or \
source_datum.case_type == child.case_type:
target_datum = copy.copy(child)
target_datum.source_id = source_datum.id
yield target_datum
else:
yield child
for module in self.modules:
for form in module.get_forms():
if form.post_form_workflow == WORKFLOW_DEFAULT:
continue
form_command = self.id_strings.form_command(form)
if form.post_form_workflow == WORKFLOW_ROOT:
create_workflow_stack(suite, form_command, [], True)
elif form.post_form_workflow == WORKFLOW_MODULE:
module_command = self.id_strings.menu_id(module)
frame_children = [module_command] if module_command != self.id_strings.ROOT else []
create_workflow_stack(suite, form_command, frame_children)
elif form.post_form_workflow == WORKFLOW_PREVIOUS:
frame_children = get_frame_children(form)
# since we want to go the 'previous' screen we need to drop the last
# datum
last = frame_children.pop()
while isinstance(last, DatumMeta) and last.function:
# keep removing last element until we hit a command
# or a non-autoselect datum
last = frame_children.pop()
create_workflow_stack(suite, form_command, frame_children)
elif form.post_form_workflow == WORKFLOW_FORM:
module_id, form_id = form_command.split('-')
source_form_datums = self.get_form_datums(suite, module_id, form_id)
for link in form.form_links:
target_form = self.app.get_form(link.form_id)
target_module = target_form.get_module()
frame_children = get_frame_children(target_form)
frame_children = get_datums_matched_to_source(frame_children, source_form_datums)
if target_module in module.get_child_modules():
parent_frame_children = get_frame_children(module.get_form(0), module_only=True)
# exclude frame children from the child module if they are already
# supplied by the parent module
child_ids_in_parent = {getattr(child, "id", child) for child in parent_frame_children}
frame_children = parent_frame_children + [
child for child in frame_children
if getattr(child, "id", child) not in child_ids_in_parent
]
create_workflow_stack(suite, form_command, frame_children, if_clause=link.xpath)
def get_form_datums(self, suite, module_id, form_id):
return self.get_module_datums(suite, module_id)[form_id]
def get_module_datums(self, suite, module_id):
_, datums = self._get_entries_datums(suite)
return datums[module_id]
def get_form_entry(self, suite, form_command):
entries, _ = self._get_entries_datums(suite)
return entries[form_command]
@memoized
def _get_entries_datums(self, suite):
datums = defaultdict(lambda: defaultdict(list))
entries = {}
def _include_datums(entry):
# might want to make this smarter in the future, but for now just hard-code
# formats that we know we don't need or don't work
return not entry.command.id.startswith('reports') and not entry.command.id.endswith('case-list')
for e in filter(_include_datums, suite.entries):
command = e.command.id
module_id, form_id = command.split('-', 1)
entries[command] = e
if not e.datums:
datums[module_id][form_id] = []
else:
for d in e.datums:
datums[module_id][form_id].append(DatumMeta(d))
return entries, datums
@property
def xform_resources(self):
first = []
last = []
for form_stuff in self.app.get_forms(bare=False):
form = form_stuff["form"]
if form_stuff['type'] == 'module_form':
path = './modules-{module.id}/forms-{form.id}.xml'.format(**form_stuff)
this_list = first
else:
path = './user_registration.xml'
this_list = last
resource = XFormResource(
id=self.id_strings.xform_resource(form),
version=form.get_version(),
local=path,
remote=path,
)
if form_stuff['type'] == 'module_form' and self.app.build_version >= '2.9':
resource.descriptor = u"Form: (Module {module_name}) - {form_name}".format(
module_name=trans(form_stuff["module"]["name"], langs=[self.app.default_language]),
form_name=trans(form["name"], langs=[self.app.default_language])
)
elif path == './user_registration.xml':
resource.descriptor=u"User Registration Form"
this_list.append(resource)
for x in first:
yield x
for x in last:
yield x
@property
def locale_resources(self):
for lang in ["default"] + self.app.build_langs:
path = './{lang}/app_strings.txt'.format(lang=lang)
resource = LocaleResource(
language=lang,
id=self.id_strings.locale_resource(lang),
version=self.app.version,
local=path,
remote=path,
)
if self.app.build_version >= '2.9':
unknown_lang_txt = u"Unknown Language (%s)" % lang
resource.descriptor = u"Translations: %s" % languages_mapping().get(lang, [unknown_lang_txt])[0]
yield resource
def build_detail(self, module, detail_type, detail, detail_column_infos,
tabs, id, title, start, end):
"""
Recursively builds the Detail object.
(Details can contain other details for each of their tabs)
"""
from corehq.apps.app_manager.detail_screen import get_column_generator
d = Detail(id=id, title=title)
if tabs:
tab_spans = detail.get_tab_spans()
for tab in tabs:
sub_detail = self.build_detail(
module,
detail_type,
detail,
detail_column_infos,
[],
None,
Text(locale_id=self.id_strings.detail_tab_title_locale(
module, detail_type, tab
)),
tab_spans[tab.id][0],
tab_spans[tab.id][1]
)
if sub_detail:
d.details.append(sub_detail)
if len(d.details):
return d
else:
return None
# Base case (has no tabs)
else:
# Add lookup
if detail.lookup_enabled and detail.lookup_action:
d.lookup = Lookup(
name=detail.lookup_name or None,
action=detail.lookup_action,
image=detail.lookup_image or None,
)
d.lookup.extras = [Extra(**e) for e in detail.lookup_extras]
d.lookup.responses = [Response(**r) for r in detail.lookup_responses]
# Add variables
variables = list(
self.detail_variables(module, detail, detail_column_infos[start:end])
)
if variables:
d.variables.extend(variables)
# Add fields
for column_info in detail_column_infos[start:end]:
fields = get_column_generator(
self.app, module, detail,
detail_type=detail_type, *column_info
).fields
d.fields.extend(fields)
# Add actions
if module.case_list_form.form_id and detail_type.endswith('short') and \
not (hasattr(module, 'parent_select') and module.parent_select.active):
# add form action to detail
form = self.app.get_form(module.case_list_form.form_id)
if form.form_type == 'module_form':
case_session_var = form.session_var_for_action('open_case')
elif form.form_type == 'advanced_form':
# match case session variable
reg_action = form.get_registration_actions(module.case_type)[0]
case_session_var = reg_action.case_session_var
d.action = Action(
display=Display(
text=Text(locale_id=self.id_strings.case_list_form_locale(module)),
media_image=module.case_list_form.media_image,
media_audio=module.case_list_form.media_audio,
),
stack=Stack()
)
frame = PushFrame()
frame.add_command(XPath.string(self.id_strings.form_command(form)))
frame.add_datum(StackDatum(id=case_session_var, value='uuid()'))
frame.add_datum(StackDatum(id=RETURN_TO, value=XPath.string(self.id_strings.menu_id(module))))
d.action.stack.add_frame(frame)
try:
if not self.app.enable_multi_sort:
d.fields[0].sort = 'default'
except IndexError:
pass
else:
# only yield the Detail if it has Fields
return d
@property
@memoized
def details(self):
r = []
if not self.app.use_custom_suite:
for module in self.modules:
for detail_type, detail, enabled in module.get_details():
if enabled:
if detail.custom_xml:
d = load_xmlobject_from_string(
detail.custom_xml,
xmlclass=Detail
)
r.append(d)
else:
detail_column_infos = get_detail_column_infos(
detail,
include_sort=detail_type.endswith('short'),
)
if detail_column_infos:
if detail.use_case_tiles:
r.append(self.build_case_tile_detail(
module, detail, detail_type
))
else:
d = self.build_detail(
module,
detail_type,
detail,
detail_column_infos,
list(detail.get_tabs()),
self.id_strings.detail(module, detail_type),
Text(locale_id=self.id_strings.detail_title_locale(
module, detail_type
)),
0,
len(detail_column_infos)
)
if d:
r.append(d)
return r
def detail_variables(self, module, detail, detail_column_infos):
has_schedule_columns = any(ci.column.field_type == FIELD_TYPE_SCHEDULE for ci in detail_column_infos)
if hasattr(module, 'has_schedule') and \
module.has_schedule and \
module.all_forms_require_a_case and \
has_schedule_columns:
forms_due = []
for form in module.get_forms():
if not (form.schedule and form.schedule.anchor):
raise ScheduleError('Form in schedule module is missing schedule: %s' % form.default_name())
fixture_id = self.id_strings.schedule_fixture(form)
anchor = form.schedule.anchor
# @late_window = '' or today() <= (date(edd) + int(@due) + int(@late_window))
within_window = XPath.or_(
XPath('@late_window').eq(XPath.string('')),
XPath('today() <= ({} + {} + {})'.format(
XPath.date(anchor),
XPath.int('@due'),
XPath.int('@late_window'))
)
)
due_first = ScheduleFixtureInstance(fixture_id).visit().\
select_raw(within_window).\
select_raw("1").slash('@due')
# current_schedule_phase = 1 and anchor != '' and (
# instance(...)/schedule/@expires = ''
# or
# today() < (date(anchor) + instance(...)/schedule/@expires)
# )
expires = ScheduleFixtureInstance(fixture_id).expires()
valid_not_expired = XPath.and_(
XPath(SCHEDULE_PHASE).eq(form.id + 1),
XPath(anchor).neq(XPath.string('')),
XPath.or_(
XPath(expires).eq(XPath.string('')),
"today() < ({} + {})".format(XPath.date(anchor), expires)
))
visit_num_valid = XPath('@id > {}'.format(
SCHEDULE_LAST_VISIT.format(form.schedule_form_id)
))
due_not_first = ScheduleFixtureInstance(fixture_id).visit().\
select_raw(visit_num_valid).\
select_raw(within_window).\
select_raw("1").slash('@due')
name = 'next_{}'.format(form.schedule_form_id)
forms_due.append(name)
def due_date(due_days):
return '{} + {}'.format(XPath.date(anchor), XPath.int(due_days))
xpath_phase_set = XPath.if_(valid_not_expired, due_date(due_not_first), 0)
if form.id == 0: # first form must cater for empty phase
yield DetailVariable(
name=name,
function=XPath.if_(
XPath(SCHEDULE_PHASE).eq(XPath.string('')),
due_date(due_first),
xpath_phase_set
)
)
else:
yield DetailVariable(name=name, function=xpath_phase_set)
yield DetailVariable(
name='next_due',
function='min({})'.format(','.join(forms_due))
)
yield DetailVariable(
name='is_late',
function='next_due < today()'
)
def build_case_tile_detail(self, module, detail, detail_type):
"""
Return a Detail node from an apps.app_manager.models.Detail that is
configured to use case tiles.
This method does so by injecting the appropriate strings into a template
string.
"""
from corehq.apps.app_manager.detail_screen import get_column_xpath_generator
template_args = {
"detail_id": self.id_strings.detail(module, detail_type),
"title_text_id": self.id_strings.detail_title_locale(
module, detail_type
)
}
# Get field/case property mappings
cols_by_tile = {col.case_tile_field: col for col in detail.columns}
for template_field in ["header", "top_left", "sex", "bottom_left", "date"]:
column = cols_by_tile.get(template_field, None)
if column is None:
raise SuiteError(
'No column was mapped to the "{}" case tile field'.format(
template_field
)
)
template_args[template_field] = {
"prop_name": get_column_xpath_generator(
self.app, module, detail, column
).xpath,
"locale_id": self.id_strings.detail_column_header_locale(
module, detail_type, column,
),
# Just using default language for now
# The right thing to do would be to reference the app_strings.txt I think
"prefix": escape(
column.header.get(self.app.default_language, "")
)
}
if column.format == "enum":
template_args[template_field]["enum_keys"] = {}
for mapping in column.enum:
template_args[template_field]["enum_keys"][mapping.key] = \
self.id_strings.detail_column_enum_variable(
module, detail_type, column, mapping.key_as_variable
)
# Populate the template
detail_as_string = self._case_tile_template_string.format(**template_args)
return load_xmlobject_from_string(detail_as_string, xmlclass=Detail)
@property
@memoized
def _case_tile_template_string(self):
"""
Return a string suitable for building a case tile detail node
through `String.format`.
"""
with open(os.path.join(
os.path.dirname(__file__), "case_tile_templates", "tdh.txt"
)) as f:
return f.read().decode('utf-8')
def get_filter_xpath(self, module, delegation=False):
filter = module.case_details.short.filter
if filter:
xpath = '[%s]' % filter
else:
xpath = ''
if delegation:
xpath += "[index/parent/@case_type = '%s']" % module.case_type
xpath += "[start_date = '' or double(date(start_date)) <= double(now())]"
return xpath
def get_nodeset_xpath(self, case_type, module, use_filter):
return "instance('casedb')/casedb/case[@case_type='{case_type}'][@status='open']{filter_xpath}".format(
case_type=case_type,
filter_xpath=self.get_filter_xpath(module) if use_filter else '',
)
def get_parent_filter(self, relationship, parent_id):
return "[index/{relationship}=instance('commcaresession')/session/data/{parent_id}]".format(
relationship=relationship,
parent_id=parent_id,
)
def get_module_by_id(self, module_id):
try:
[parent_module] = (
module for module in self.app.get_modules()
if module.unique_id == module_id
)
except ValueError:
raise ParentModuleReferenceError(
"Module %s in app %s not found" % (module_id, self.app)
)
else:
return parent_module
def get_select_chain(self, module, include_self=True):
select_chain = [module] if include_self else []
current_module = module
while hasattr(current_module, 'parent_select') and current_module.parent_select.active:
current_module = self.get_module_by_id(
current_module.parent_select.module_id
)
select_chain.append(current_module)
return select_chain
@memoized
def get_detail_mapping(self):
return {detail.id: detail for detail in self.details}
@memoized
def get_menu_relevance_mapping(self):
relevance_by_menu = defaultdict(list)
menu_by_command = {}
for menu in self.menus:
for command in menu.commands:
menu_by_command[command.id] = menu.id
if command.relevant:
relevance_by_menu[menu.id].append(command.relevant)
if menu.relevant:
relevance_by_menu[menu.id].append(menu.relevant)
return relevance_by_menu, menu_by_command
def get_detail_id_safe(self, module, detail_type):
detail_id = self.id_strings.detail(
module=module,
detail_type=detail_type,
)
return detail_id if detail_id in self.get_detail_mapping() else None
def get_instances_for_module(self, module, additional_xpaths=None):
"""
This method is used by CloudCare when filtering cases.
"""
details_by_id = self.get_detail_mapping()
detail_ids = [self.get_detail_id_safe(module, detail_type)
for detail_type, detail, enabled in module.get_details()
if enabled]
detail_ids = filter(None, detail_ids)
xpaths = set()
if additional_xpaths:
xpaths.update(additional_xpaths)
for detail_id in detail_ids:
xpaths.update(details_by_id[detail_id].get_all_xpaths())
return SuiteGenerator.get_required_instances(xpaths)
@staticmethod
def get_required_instances(xpaths):
instance_re = r"""instance\(['"]([\w\-:]+)['"]\)"""
instances = set()
for xpath in xpaths:
instance_names = re.findall(instance_re, xpath)
for instance_name in instance_names:
try:
scheme, _ = instance_name.split(':', 1)
except ValueError:
scheme = None
factory = get_instance_factory(scheme)
instance = factory(instance_name)
if instance:
instances.add(instance)
else:
raise UnknownInstanceError("Instance reference not recognized: {}".format(instance_name))
return instances
@staticmethod
def add_referenced_instances(entry, details_by_id, relevance_by_menu, menu_by_command):
detail_ids = set()
xpaths = set()
for datum in entry.datums:
detail_ids.add(datum.detail_confirm)
detail_ids.add(datum.detail_select)
xpaths.add(datum.nodeset)
xpaths.add(datum.function)
details = [details_by_id[detail_id] for detail_id in detail_ids
if detail_id]
entry_id = entry.command.id
menu_id = menu_by_command[entry_id]
relevances = relevance_by_menu[menu_id]
xpaths.update(relevances)
for detail in details:
xpaths.update(detail.get_all_xpaths())
for assertion in entry.assertions:
xpaths.add(assertion.test)
if entry.stack:
for frame in entry.stack.frames:
xpaths.add(frame.if_clause)
if hasattr(frame, 'datums'):
for datum in frame.datums:
xpaths.add(datum.value)
xpaths.discard(None)
instances = SuiteGenerator.get_required_instances(xpaths)
entry.require_instance(*instances)
def get_userdata_autoselect(self, key, session_id, mode):
base_xpath = session_var('data', path='user')
xpath = session_var(key, path='user/data')
protected_xpath = XPath.if_(
XPath.and_(base_xpath.count().eq(1), xpath.count().eq(1)),
xpath,
XPath.empty_string(),
)
datum = SessionDatum(id=session_id, function=protected_xpath)
assertions = [
self.get_assertion(
XPath.and_(base_xpath.count().eq(1),
xpath.count().eq(1)),
'case_autoload.{0}.property_missing'.format(mode),
[key],
),
self.get_assertion(
CaseIDXPath(xpath).case().count().eq(1),
'case_autoload.{0}.case_missing'.format(mode),
)
]
return datum, assertions
@property
def entries(self):
# avoid circular dependency
from corehq.apps.app_manager.models import Module, AdvancedModule
results = []
for module in self.modules:
for form in module.get_forms():
e = Entry()
e.form = form.xmlns
e.command = Command(
id=self.id_strings.form_command(form),
locale_id=self.id_strings.form_locale(form),
media_image=form.media_image,
media_audio=form.media_audio,
)
config_entry = {
'module_form': self.configure_entry_module_form,
'advanced_form': self.configure_entry_advanced_form,
'careplan_form': self.configure_entry_careplan_form,
}[form.form_type]
config_entry(module, e, form)
if (
self.app.commtrack_enabled and
session_var('supply_point_id') in getattr(form, 'source', "")
):
from .models import AUTO_SELECT_LOCATION
datum, assertions = self.get_userdata_autoselect(
'commtrack-supply-point',
'supply_point_id',
AUTO_SELECT_LOCATION,
)
e.datums.append(datum)
e.assertions.extend(assertions)
results.append(e)
if hasattr(module, 'case_list') and module.case_list.show:
e = Entry(
command=Command(
id=self.id_strings.case_list_command(module),
locale_id=self.id_strings.case_list_locale(module),
media_image=module.case_list.media_image,
media_audio=module.case_list.media_audio,
)
)
if isinstance(module, Module):
for datum_meta in self.get_datum_meta_module(module, use_filter=False):
e.datums.append(datum_meta['datum'])
elif isinstance(module, AdvancedModule):
e.datums.append(SessionDatum(
id='case_id_case_%s' % module.case_type,
nodeset=(self.get_nodeset_xpath(module.case_type, module, False)),
value="./@case_id",
detail_select=self.get_detail_id_safe(module, 'case_short'),
detail_confirm=self.get_detail_id_safe(module, 'case_long')
))
if self.app.commtrack_enabled:
e.datums.append(SessionDatum(
id='product_id',
nodeset=ProductInstanceXpath().instance(),
value="./@id",
detail_select=self.get_detail_id_safe(module, 'product_short')
))
results.append(e)
for entry in module.get_custom_entries():
results.append(entry)
return results
def get_assertion(self, test, locale_id, locale_arguments=None):
assertion = Assertion(test=test)
text = Text(locale_id=locale_id)
if locale_arguments:
locale = text.locale
for arg in locale_arguments:
locale.arguments.append(LocaleArgument(value=arg))
assertion.text.append(text)
return assertion
def add_case_sharing_assertion(self, entry):
assertion = self.get_assertion("count(instance('groups')/groups/group) = 1",
'case_sharing.exactly_one_group')
entry.assertions.append(assertion)
def get_auto_select_assertions(self, case_id_xpath, mode, locale_arguments=None):
case_count = CaseIDXPath(case_id_xpath).case().count()
return [
self.get_assertion(
"{0} = 1".format(case_id_xpath.count()),
'case_autoload.{0}.property_missing'.format(mode),
locale_arguments
),
self.get_assertion(
"{0} = 1".format(case_count),
'case_autoload.{0}.case_missing'.format(mode),
)
]
def get_extra_case_id_datums(self, form):
datums = []
actions = form.active_actions()
if form.form_type == 'module_form' and actions_use_usercase(actions):
if not self.is_usercase_enabled:
raise SuiteError('Form uses usercase, but usercase not enabled')
case = UserCaseXPath().case()
datums.append({
'datum': SessionDatum(id=USERCASE_ID, function=('%s/@case_id' % case)),
'case_type': USERCASE_TYPE,
'requires_selection': False,
'action': None # Unused (and could be actions['usercase_update'] or actions['usercase_preload'])
})
return datums
@staticmethod
def any_usercase_datums(datums):
return any(d['case_type'] == USERCASE_TYPE for d in datums)
def get_new_case_id_datums_meta(self, form):
if not form:
return []
datums = []
if form.form_type == 'module_form':
actions = form.active_actions()
if 'open_case' in actions:
datums.append({
'datum': SessionDatum(id=form.session_var_for_action('open_case'), function='uuid()'),
'case_type': form.get_module().case_type,
'requires_selection': False,
'action': actions['open_case']
})
if 'subcases' in actions:
for i, subcase in enumerate(actions['subcases']):
# don't put this in the loop to be consistent with the form's indexing
# see XForm.create_casexml_2
if not subcase.repeat_context:
datums.append({
'datum': SessionDatum(id=form.session_var_for_action('subcase', i), function='uuid()'),
'case_type': subcase.case_type,
'requires_selection': False,
'action': subcase
})
elif form.form_type == 'advanced_form':
for action in form.actions.get_open_actions():
if not action.repeat_context:
datums.append({
'datum': SessionDatum(id=action.case_session_var, function='uuid()'),
'case_type': action.case_type,
'requires_selection': False,
'action': action
})
return datums
def configure_entry_as_case_list_form(self, form, entry):
target_module = form.case_list_module
if form.form_type == 'module_form':
source_session_var = form.session_var_for_action('open_case')
if form.form_type == 'advanced_form':
# match case session variable
reg_action = form.get_registration_actions(target_module.case_type)[0]
source_session_var = reg_action.case_session_var
target_session_var = 'case_id'
if target_module.module_type == 'advanced':
# match case session variable for target module
form = target_module.forms[0]
target_session_var = form.actions.load_update_cases[0].case_session_var
entry.stack = Stack()
source_case_id = session_var(source_session_var)
case_count = CaseIDXPath(source_case_id).case().count()
return_to = session_var(RETURN_TO)
frame_case_created = CreateFrame(if_clause='{} = 1 and {} > 0'.format(return_to.count(), case_count))
frame_case_created.add_command(return_to)
frame_case_created.add_datum(StackDatum(id=target_session_var, value=source_case_id))
entry.stack.add_frame(frame_case_created)
frame_case_not_created = CreateFrame(if_clause='{} = 1 and {} = 0'.format(return_to.count(), case_count))
frame_case_not_created.add_command(return_to)
entry.stack.add_frame(frame_case_not_created)
def configure_entry_module_form(self, module, e, form=None, use_filter=True, **kwargs):
def case_sharing_requires_assertion(form):
actions = form.active_actions()
if 'open_case' in actions and autoset_owner_id_for_open_case(actions):
return True
if 'subcases' in actions:
for subcase in actions['subcases']:
if autoset_owner_id_for_subcase(subcase):
return True
return False
datums = []
if not form or form.requires_case():
datums.extend(self.get_datum_meta_module(module, use_filter=True))
datums.extend(self.get_new_case_id_datums_meta(form))
datums.extend(self.get_extra_case_id_datums(form))
for datum in datums:
e.datums.append(datum['datum'])
if form and 'open_case' in form.active_actions() and form.is_case_list_form:
self.configure_entry_as_case_list_form(form, e)
if form and self.app.case_sharing and case_sharing_requires_assertion(form):
self.add_case_sharing_assertion(e)
def _get_datums_meta(self, module):
"""
return list of dicts containing datum IDs and case types
[
{'session_var': 'parent_parent_id', ... },
{'session_var': 'parent_id', ...}
{'session_var': 'child_id', ...},
]
"""
if not (module and module.module_type == 'basic'):
return []
select_chain = self.get_select_chain(module)
return [
{
'session_var': ('parent_' * i or 'case_') + 'id',
'case_type': mod.case_type,
'module': mod,
'index': i
}
for i, mod in reversed(list(enumerate(select_chain)))
]
def get_datum_meta_module(self, module, use_filter=False):
datums = []
datums_meta = self._get_datums_meta(module)
for i, datum in enumerate(datums_meta):
# get the session var for the previous datum if there is one
parent_id = datums_meta[i - 1]['session_var'] if i >= 1 else ''
if parent_id:
parent_filter = self.get_parent_filter(datum['module'].parent_select.relationship, parent_id)
else:
parent_filter = ''
detail_persistent = None
detail_inline = False
for detail_type, detail, enabled in datum['module'].get_details():
if (
detail.persist_tile_on_forms
and (detail.use_case_tiles or detail.custom_xml)
and enabled
):
detail_persistent = self.id_strings.detail(datum['module'], detail_type)
detail_inline = bool(detail.pull_down_tile)
break
datums.append({
'datum': SessionDatum(
id=datum['session_var'],
nodeset=(self.get_nodeset_xpath(datum['case_type'], datum['module'], use_filter)
+ parent_filter),
value="./@case_id",
detail_select=self.get_detail_id_safe(datum['module'], 'case_short'),
detail_confirm=(
self.get_detail_id_safe(datum['module'], 'case_long')
if datum['index'] == 0 and not detail_inline else None
),
detail_persistent=detail_persistent,
detail_inline=self.get_detail_id_safe(datum['module'], 'case_long') if detail_inline else None
),
'case_type': datum['case_type'],
'requires_selection': True,
'action': 'update_case'
})
return datums
def get_auto_select_datums_and_assertions(self, action, auto_select, form):
from corehq.apps.app_manager.models import AUTO_SELECT_USER, AUTO_SELECT_CASE, \
AUTO_SELECT_FIXTURE, AUTO_SELECT_RAW, AUTO_SELECT_USERCASE
if auto_select.mode == AUTO_SELECT_USER:
return self.get_userdata_autoselect(
auto_select.value_key,
action.case_session_var,
auto_select.mode,
)
elif auto_select.mode == AUTO_SELECT_CASE:
try:
ref = form.actions.actions_meta_by_tag[auto_select.value_source]['action']
sess_var = ref.case_session_var
except KeyError:
raise ValueError("Case tag not found: %s" % auto_select.value_source)
xpath = CaseIDXPath(session_var(sess_var)).case().index_id(auto_select.value_key)
assertions = self.get_auto_select_assertions(xpath, auto_select.mode, [auto_select.value_key])
return SessionDatum(
id=action.case_session_var,
function=xpath
), assertions
elif auto_select.mode == AUTO_SELECT_FIXTURE:
xpath_base = ItemListFixtureXpath(auto_select.value_source).instance()
xpath = xpath_base.slash(auto_select.value_key)
fixture_assertion = self.get_assertion(
"{0} = 1".format(xpath_base.count()),
'case_autoload.{0}.exactly_one_fixture'.format(auto_select.mode),
[auto_select.value_source]
)
assertions = self.get_auto_select_assertions(xpath, auto_select.mode, [auto_select.value_key])
return SessionDatum(
id=action.case_session_var,
function=xpath
), [fixture_assertion] + assertions
elif auto_select.mode == AUTO_SELECT_RAW:
case_id_xpath = auto_select.value_key
case_count = CaseIDXPath(case_id_xpath).case().count()
return SessionDatum(
id=action.case_session_var,
function=case_id_xpath
), [
self.get_assertion(
"{0} = 1".format(case_count),
'case_autoload.{0}.case_missing'.format(auto_select.mode)
)
]
elif auto_select.mode == AUTO_SELECT_USERCASE:
case = UserCaseXPath().case()
return SessionDatum(
id=action.case_session_var,
function=case.slash('@case_id')
), [
self.get_assertion(
"{0} = 1".format(case.count()),
'case_autoload.{0}.case_missing'.format(auto_select.mode)
)
]
def configure_entry_advanced_form(self, module, e, form, **kwargs):
def case_sharing_requires_assertion(form):
actions = form.actions.open_cases
for action in actions:
if 'owner_id' in action.case_properties:
return True
return False
datums, assertions = self.get_datum_meta_assertions_advanced(module, form)
datums.extend(self.get_new_case_id_datums_meta(form))
for datum_meta in datums:
e.datums.append(datum_meta['datum'])
# assertions come after session
e.assertions.extend(assertions)
if form.is_registration_form() and form.is_case_list_form:
self.configure_entry_as_case_list_form(form, e)
if self.app.case_sharing and case_sharing_requires_assertion(form):
self.add_case_sharing_assertion(e)
def get_datum_meta_assertions_advanced(self, module, form):
def get_target_module(case_type, module_id, with_product_details=False):
if module_id:
if module_id == module.unique_id:
return module
from corehq.apps.app_manager.models import ModuleNotFoundException
try:
target = module.get_app().get_module_by_unique_id(module_id)
if target.case_type != case_type:
raise ParentModuleReferenceError(
"Module with ID %s has incorrect case type" % module_id
)
if with_product_details and not hasattr(target, 'product_details'):
raise ParentModuleReferenceError(
"Module with ID %s has no product details configuration" % module_id
)
return target
except ModuleNotFoundException as ex:
raise ParentModuleReferenceError(ex.message)
else:
if case_type == module.case_type:
return module
target_modules = [mod for mod in module.get_app().modules
if mod.case_type == case_type and
(not with_product_details or hasattr(mod, 'product_details'))]
try:
return target_modules[0]
except IndexError:
raise ParentModuleReferenceError(
"Module with case type %s in app %s not found" % (case_type, self.app)
)
datums = []
assertions = []
for action in form.actions.get_load_update_actions():
auto_select = action.auto_select
if auto_select and auto_select.mode:
datum, assertions = self.get_auto_select_datums_and_assertions(action, auto_select, form)
datums.append({
'datum': datum,
'case_type': None,
'requires_selection': False,
'action': action
})
else:
if action.parent_tag:
parent_action = form.actions.actions_meta_by_tag[action.parent_tag]['action']
parent_filter = self.get_parent_filter(
action.parent_reference_id,
parent_action.case_session_var
)
else:
parent_filter = ''
target_module = get_target_module(action.case_type, action.details_module)
referenced_by = form.actions.actions_meta_by_parent_tag.get(action.case_tag)
datum = SessionDatum(
id=action.case_session_var,
nodeset=(self.get_nodeset_xpath(action.case_type, target_module, True) + parent_filter),
value="./@case_id",
detail_select=self.get_detail_id_safe(target_module, 'case_short'),
detail_confirm=(
self.get_detail_id_safe(target_module, 'case_long')
if not referenced_by or referenced_by['type'] != 'load' else None
)
)
datums.append({
'datum': datum,
'case_type': action.case_type,
'requires_selection': True,
'action': action
})
if module.get_app().commtrack_enabled:
try:
last_action = list(form.actions.get_load_update_actions())[-1]
if last_action.show_product_stock:
nodeset = ProductInstanceXpath().instance()
if last_action.product_program:
nodeset = nodeset.select('program_id', last_action.product_program)
target_module = get_target_module(last_action.case_type, last_action.details_module, True)
datums.append({
'datum': SessionDatum(
id='product_id',
nodeset=nodeset,
value="./@id",
detail_select=self.get_detail_id_safe(target_module, 'product_short')
),
'case_type': None,
'requires_selection': True,
'action': None
})
except IndexError:
pass
root_module = module.root_module
root_datums = []
if root_module and root_module.module_type == 'basic':
# For advanced modules the onus is on the user to make things work by loading the correct cases and
# using the correct case tags.
try:
# assume that all forms in the root module have the same case management
root_module_form = root_module.get_form(0)
except FormNotFoundException:
pass
else:
if root_module_form.requires_case():
root_datums.extend(self.get_datum_meta_module(root_module))
root_datums.extend(self.get_new_case_id_datums_meta(root_module_form))
if root_datums:
# we need to try and match the datums to the root module so that
# the navigation on the phone works correctly
# 1. Add in any datums that don't require user selection e.g. new case IDs
# 2. Match the datum ID for datums that appear in the same position and
# will be loading the same case type
# see advanced_app_features#child-modules in docs
datum_pairs = list(izip_longest(datums, root_datums))
index = 0
changed_ids_by_case_tag = {}
for this_datum_meta, parent_datum_meta in datum_pairs:
if not this_datum_meta:
continue
this_datum = this_datum_meta['datum']
action = this_datum_meta['action']
if action and action.parent_tag in changed_ids_by_case_tag:
# update any reference to previously changed datums
change = changed_ids_by_case_tag[action.parent_tag]
nodeset = this_datum.nodeset
old = session_var(change['old_id'])
new = session_var(change['new_id'])
this_datum.nodeset = nodeset.replace(old, new)
if not parent_datum_meta:
continue
that_datum = parent_datum_meta['datum']
if this_datum.id != that_datum.id:
if not parent_datum_meta['requires_selection']:
datums.insert(index, parent_datum_meta)
elif this_datum_meta['case_type'] == parent_datum_meta['case_type']:
action = action
if action:
changed_ids_by_case_tag[action.case_tag] = {
"old_id": this_datum.id,
"new_id": that_datum.id
}
this_datum.id = that_datum.id
index += 1
return datums, assertions
def configure_entry_careplan_form(self, module, e, form=None, **kwargs):
parent_module = self.get_module_by_id(module.parent_select.module_id)
e.datums.append(SessionDatum(
id='case_id',
nodeset=self.get_nodeset_xpath(parent_module.case_type, parent_module, False),
value="./@case_id",
detail_select=self.get_detail_id_safe(parent_module, 'case_short'),
detail_confirm=self.get_detail_id_safe(parent_module, 'case_long')
))
def session_datum(datum_id, case_type, parent_ref, parent_val):
nodeset = CaseTypeXpath(case_type).case().select(
'index/%s' % parent_ref, session_var(parent_val), quote=False
).select('@status', 'open')
return SessionDatum(
id=datum_id,
nodeset=nodeset,
value="./@case_id",
detail_select=self.get_detail_id_safe(module, '%s_short' % case_type),
detail_confirm=self.get_detail_id_safe(module, '%s_long' % case_type)
)
e.stack = Stack()
frame = CreateFrame()
e.stack.add_frame(frame)
if form.case_type == CAREPLAN_GOAL:
if form.mode == 'create':
new_goal_id_var = 'case_id_goal_new'
e.datums.append(SessionDatum(id=new_goal_id_var, function='uuid()'))
elif form.mode == 'update':
new_goal_id_var = 'case_id_goal'
e.datums.append(session_datum(new_goal_id_var, CAREPLAN_GOAL, 'parent', 'case_id'))
if not module.display_separately:
open_goal = CaseIDXPath(session_var(new_goal_id_var)).case().select('@status', 'open')
frame.if_clause = '{count} = 1'.format(count=open_goal.count())
frame.add_command(XPath.string(self.id_strings.menu_id(parent_module)))
frame.add_datum(StackDatum(id='case_id', value=session_var('case_id')))
frame.add_command(XPath.string(self.id_strings.menu_id(module)))
frame.add_datum(StackDatum(id='case_id_goal', value=session_var(new_goal_id_var)))
else:
frame.add_command(XPath.string(self.id_strings.menu_id(module)))
frame.add_datum(StackDatum(id='case_id', value=session_var('case_id')))
elif form.case_type == CAREPLAN_TASK:
if not module.display_separately:
frame.add_command(XPath.string(self.id_strings.menu_id(parent_module)))
frame.add_datum(StackDatum(id='case_id', value=session_var('case_id')))
frame.add_command(XPath.string(self.id_strings.menu_id(module)))
frame.add_datum(StackDatum(id='case_id_goal', value=session_var('case_id_goal')))
if form.mode == 'update':
count = CaseTypeXpath(CAREPLAN_TASK).case().select(
'index/goal', session_var('case_id_goal'), quote=False
).select('@status', 'open').count()
frame.if_clause = '{count} >= 1'.format(count=count)
frame.add_command(XPath.string(
self.id_strings.form_command(module.get_form_by_type(CAREPLAN_TASK, 'update'))
))
else:
frame.add_command(XPath.string(self.id_strings.menu_id(module)))
frame.add_datum(StackDatum(id='case_id', value=session_var('case_id')))
if form.mode == 'create':
e.datums.append(session_datum('case_id_goal', CAREPLAN_GOAL, 'parent', 'case_id'))
elif form.mode == 'update':
e.datums.append(session_datum('case_id_goal', CAREPLAN_GOAL, 'parent', 'case_id'))
e.datums.append(session_datum('case_id_task', CAREPLAN_TASK, 'goal', 'case_id_goal'))
@property
@memoized
def menus(self):
# avoid circular dependency
from corehq.apps.app_manager.models import CareplanModule, AdvancedForm
menus = []
for module in self.modules:
if isinstance(module, CareplanModule):
update_menu = Menu(
id=self.id_strings.menu_id(module),
locale_id=self.id_strings.module_locale(module),
)
if not module.display_separately:
parent = self.get_module_by_id(module.parent_select.module_id)
create_goal_form = module.get_form_by_type(CAREPLAN_GOAL, 'create')
create_menu = Menu(
id=self.id_strings.menu_id(parent),
locale_id=self.id_strings.module_locale(parent),
)
create_menu.commands.append(Command(id=self.id_strings.form_command(create_goal_form)))
menus.append(create_menu)
update_menu.root = self.id_strings.menu_id(parent)
else:
update_menu.commands.extend([
Command(id=self.id_strings.form_command(module.get_form_by_type(CAREPLAN_GOAL, 'create'))),
])
update_menu.commands.extend([
Command(id=self.id_strings.form_command(module.get_form_by_type(CAREPLAN_GOAL, 'update'))),
Command(id=self.id_strings.form_command(module.get_form_by_type(CAREPLAN_TASK, 'create'))),
Command(id=self.id_strings.form_command(module.get_form_by_type(CAREPLAN_TASK, 'update'))),
])
menus.append(update_menu)
elif hasattr(module, 'get_menus'):
for menu in module.get_menus():
menus.append(menu)
else:
menu_kwargs = {
'id': self.id_strings.menu_id(module),
'locale_id': self.id_strings.module_locale(module),
'media_image': module.media_image,
'media_audio': module.media_audio,
}
if self.id_strings.menu_root(module):
menu_kwargs['root'] = self.id_strings.menu_root(module)
if (self.app.domain and MODULE_FILTER.enabled(self.app.domain) and
self.app.enable_module_filtering and
getattr(module, 'module_filter', None)):
menu_kwargs['relevant'] = interpolate_xpath(module.module_filter)
menu = Menu(**menu_kwargs)
def get_commands():
for form in module.get_forms():
command = Command(id=self.id_strings.form_command(form))
if module.all_forms_require_a_case() and \
not module.put_in_root and \
getattr(form, 'form_filter', None):
if isinstance(form, AdvancedForm):
try:
action = next(a for a in form.actions.load_update_cases if not a.auto_select)
case = CaseIDXPath(session_var(action.case_session_var)).case() if action else None
except IndexError:
case = None
else:
case = SESSION_CASE_ID.case()
if case:
command.relevant = interpolate_xpath(form.form_filter, case)
yield command
if hasattr(module, 'case_list') and module.case_list.show:
yield Command(id=self.id_strings.case_list_command(module))
menu.commands.extend(get_commands())
menus.append(menu)
return menus
@property
def fixtures(self):
if self.app.case_sharing:
f = Fixture(id='user-groups')
f.user_id = 'demo_user'
groups = etree.fromstring("""
<groups>
<group id="demo_user_group_id">
<name>Demo Group</name>
</group>
</groups>
""")
f.set_content(groups)
yield f
schedule_modules = (module for module in self.modules if getattr(module, 'has_schedule', False) and
module.all_forms_require_a_case)
schedule_forms = (form for module in schedule_modules for form in module.get_forms())
for form in schedule_forms:
schedule = form.schedule
fx = ScheduleFixture(
id=self.id_strings.schedule_fixture(form),
schedule=Schedule(
expires=schedule.expires,
post_schedule_increment=schedule.post_schedule_increment
))
for i, visit in enumerate(schedule.visits):
fx.schedule.visits.append(ScheduleVisit(
id=i + 1,
due=visit.due,
late_window=visit.late_window
))
yield fx
class MediaSuiteGenerator(SuiteGeneratorBase):
descriptor = u"Media Suite File"
sections = ('media_resources',)
@property
def media_resources(self):
PREFIX = 'jr://file/'
# you have to call remove_unused_mappings
# before iterating through multimedia_map
self.app.remove_unused_mappings()
if self.app.multimedia_map is None:
self.app.multimedia_map = {}
for path, m in self.app.multimedia_map.items():
unchanged_path = path
if path.startswith(PREFIX):
path = path[len(PREFIX):]
else:
raise MediaResourceError('%s does not start with %s' % (path, PREFIX))
path, name = split_path(path)
# CommCare assumes jr://media/,
# which is an alias to jr://file/commcare/media/
# so we need to replace 'jr://file/' with '../../'
# (this is a hack)
install_path = '../../{}'.format(path)
local_path = './{}/{}'.format(path, name)
if not getattr(m, 'unique_id', None):
# lazy migration for adding unique_id to map_item
m.unique_id = HQMediaMapItem.gen_unique_id(m.multimedia_id, unchanged_path)
descriptor = None
if self.app.build_version >= '2.9':
type_mapping = {"CommCareImage": "Image",
"CommCareAudio": "Audio",
"CommCareVideo": "Video"}
descriptor = u"{filetype} File: {name}".format(
filetype=type_mapping.get(m.media_type, "Media"),
name=name
)
yield MediaResource(
id=self.id_strings.media_resource(m.unique_id, name),
path=install_path,
version=m.version,
descriptor=descriptor,
local=(local_path
if self.app.enable_local_resource
else None),
remote=get_url_base() + reverse(
'hqmedia_download',
args=[m.media_type, m.multimedia_id]
) + urllib.quote(name.encode('utf-8')) if name else name
)
def validate_suite(suite):
if isinstance(suite, unicode):
suite = suite.encode('utf8')
if isinstance(suite, str):
suite = etree.fromstring(suite)
if isinstance(suite, etree._Element):
suite = Suite(suite)
assert isinstance(suite, Suite),\
'Could not convert suite to a Suite XmlObject: %r' % suite
def is_unique_list(things):
return len(set(things)) == len(things)
for detail in suite.details:
orders = [field.sort_node.order for field in detail.fields
if field and field.sort_node]
if not is_unique_list(orders):
raise SuiteValidationError('field/sort/@order must be unique per detail')
|
puttarajubr/commcare-hq
|
corehq/apps/app_manager/suite_xml.py
|
Python
|
bsd-3-clause
| 89,116
|
[
"VisIt"
] |
c3ba77ad5aa8559de3f19f7ec2512b744930b346c066ef6c0387d7181a5e3b9a
|
# remove VERTICES section from a VTK file
# Forrest Sheng Bao http://fsbao.net
# GNU GPL v3.0 or later
def remove(inputvtk,outputvtk):
"""Scan the inputvtk file line by line until the keyword VERTICES is found and remove corresponding lines
"""
fout=open(outputvtk, 'w')
with open(inputvtk,'r') as fin:
skip_lines = 0
for line in fin.readlines():
if line[:8] == "VERTICES":
line = line.split()
skip_lines = 1 #int(line[1]) # the number of lines below should be skipped
else:
if skip_lines > 0:
skip_lines -= 1 # skip the line
else:
fout.write(line)
fout.close()
return 0
if __name__ == "__main__":
import sys
remove(sys.argv[1], sys.argv[2])
|
binarybottle/mindboggle_sidelined
|
remove_vertices.py
|
Python
|
apache-2.0
| 694
|
[
"VTK"
] |
cfd5e952b099e029c06bec011c35c93d39b7f7bb14751544e7d8bb75567e2fc4
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Contain and organize bibliographic information.
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import string
import math
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from ...lib.citation import Citation as lib_Citation
class Citation:
"""
Store information about a citation and all of its references.
"""
def __init__(self):
"""
Initialize members.
"""
self.__src_handle = None
self.__ref_list = []
def get_source_handle(self):
"""
Provide the handle to the source that this citation is for.
:return: Source Handle
:rtype: handle
"""
return self.__src_handle
def set_source_handle(self, handle):
"""
Set the handle for the source that this citation is for.
:param handle: Source Handle
:type handle: handle
"""
self.__src_handle = handle
def get_ref_list(self):
"""
List all the references to this citation.
:return: a list of references
:rtype: list of :class:`~.citation.Citation` objects
"""
return self.__ref_list
def add_reference(self, source_ref):
"""
Add a reference to this citation. If a similar reference exists, don't
add another one.
:param source_ref: Source Reference
:type source_ref: :class:`~.citation.Citation`
:return: The key of the added reference among all the references.
:rtype: char
"""
letters = string.ascii_lowercase # or (e.g.) "abcdef" for testing
letter_count = len(letters)
ref_count = len(self.__ref_list)
x_ref_count = ref_count
# Return "a" for ref_count = 0, otherwise log(0) does not work
if ref_count == 0:
self.__ref_list.append((letters[0], source_ref))
return letters[0]
last_letter = letters[ ref_count % letter_count ]
key = ""
# Calculate prek number of digits.
number_of_letters = 1 + int(math.log(float(ref_count),
float(letter_count)))
# Exclude index for number_of_letters-1
for n in range(1, number_of_letters-1):
ref_count -= pow(letter_count, n)
# Adjust number_of_letters for new index
number_of_letters = 1 + int(math.log(float(ref_count),
float(letter_count)))
for n in range(1, number_of_letters):
x_ref_count -= pow(letter_count, n)
for letter in range(1, number_of_letters):
index = x_ref_count // pow(letter_count, letter) % letter_count
key += letters[index]
key = key + last_letter
self.__ref_list.append((key, source_ref))
return key
class Bibliography:
"""
Store and organize multiple citations into a bibliography.
"""
MODE_DATE = 2**0
MODE_PAGE = 2**1
MODE_CONF = 2**2
MODE_NOTE = 2**3
MODE_MEDIA = 2**4
MODE_ALL = MODE_DATE | MODE_PAGE | MODE_CONF | MODE_NOTE | MODE_MEDIA
def __init__(self, mode=MODE_ALL):
"""
A bibliography will store citations (sources) and references to those
citations (citations). Duplicate entries will not be added. To change
what is considered duplicate, you can tell the bibliography what source
ref information you are interested in by passing in the mode.
Possible modes include:
- MODE_DATE
- MODE_PAGE
- MODE_CONF
- MODE_NOTE
- MODE_MEDIA
- MODE_ALL
If you only care about pages, set "mode=MODE_PAGE".
If you only care about dates and pages, set "mode=MODE_DATE|MODE_PAGE".
If you care about everything, set "mode=MODE_ALL".
"""
self.__citation_list = []
self.mode = mode
def add_reference(self, lib_citation):
"""
Add a reference to a source to this bibliography. If the source already
exists, don't add it again. If a similar reference exists, don't
add another one.
:param citation: Citation object
:type citation: :class:`~.citation.Citation`
:return: A tuple containing the index of the source among all the
sources and the key of the reference among all the references.
If there is no reference information, the second element will
be None.
:rtype: (int,char) or (int,None)
.. note::
Within this file, the name 'citation' is used both for
gen.lib.Citation, and for _bibliography.Citation. It is not clear
how best to rename the concepts in this file to avoid the clash,
so the names have been retained. In this function, lib_citation
is used for gen.lib.Citation instances, and citation for
_bibliography.Citation instances. Elsewhere in this file,
source_ref is used for gen.lib.Citation instances.
"""
source_handle = lib_citation.get_reference_handle()
cindex = 0
rkey = ""
citation = None
citation_found = False
for citation in self.__citation_list:
if citation.get_source_handle() == source_handle:
citation_found = True
break
cindex += 1
if not citation_found:
citation = Citation()
citation.set_source_handle(source_handle)
cindex = len(self.__citation_list)
self.__citation_list.append(citation)
if self.__sref_has_info(lib_citation):
for key, ref in citation.get_ref_list():
if self.__srefs_are_equal(ref, lib_citation):
# if a reference like this already exists, don't add
# another one
return (cindex, key)
rkey = citation.add_reference(lib_citation)
return (cindex, rkey)
def get_citation_count(self):
"""
Report the number of citations in this bibliography.
:return: number of citations
:rtype: int
"""
return len(self.__citation_list)
def get_citation_list(self):
"""
Return a list containing all the citations in this bibliography.
:return: citation list
:rtype: list of :class:`Citation` objects
"""
return self.__citation_list
def __sref_has_info(self, source_ref):
"""
Determine if this source_ref has any useful information based on the
current mode.
"""
if ( self.mode & self.MODE_PAGE ) == self.MODE_PAGE:
if source_ref.get_page() != "":
return True
if ( self.mode & self.MODE_DATE ) == self.MODE_DATE:
date = source_ref.get_date_object()
if date is not None and not date.is_empty():
return True
if ( self.mode & self.MODE_CONF ) == self.MODE_CONF:
confidence = source_ref.get_confidence_level()
if confidence is not None and confidence != \
lib_Citation.CONF_NORMAL:
return True
if ( self.mode & self.MODE_NOTE ) == self.MODE_NOTE:
if len(source_ref.get_note_list()) != 0:
return True
if ( self.mode & self.MODE_MEDIA ) == self.MODE_MEDIA:
if len(source_ref.get_media_list()) != 0:
return True
# Can't find anything interesting.
return False
def __srefs_are_equal(self, source_ref1, source_ref2):
"""
Determine if two source references are equal based on the
current mode.
"""
# The criterion for equality (in mode==MODE_ALL) is changed for
# citations. Previously, it was based on is_equal from SecondaryObject,
# which does a 'cmp' on the serialised data. (Note that this might not
# have worked properly for Dates; see comments in Date.is_equal and
# EditCitation.data_has_changed). The comparison is now made as to
# whether the two gen.lib.Citations have the same handle (i.e. they are
# actually the same database objects). It is felt that this better
# reflects the intent of Citation objects, which can be merged if they
# are intended to represent the same citation.
if self.mode == self.MODE_ALL:
return source_ref1.handle == source_ref2.handle
if ( self.mode & self.MODE_PAGE ) == self.MODE_PAGE:
if source_ref1.get_page() != source_ref2.get_page():
return False
if ( self.mode & self.MODE_DATE ) == self.MODE_DATE:
date1 = source_ref1.get_date_object()
date2 = source_ref2.get_date_object()
if not date1.is_equal(date2):
return False
if ( self.mode & self.MODE_CONF ) == self.MODE_CONF:
conf1 = source_ref1.get_confidence_level()
conf2 = source_ref2.get_confidence_level()
if conf1 != conf2:
return False
if ( self.mode & self.MODE_NOTE ) == self.MODE_NOTE:
nl1 = source_ref1.get_note_list()
nl2 = source_ref2.get_note_list()
if len(nl1) != len(nl2):
return False
for notehandle in nl1:
if notehandle not in nl2:
return False
if ( self.mode & self.MODE_MEDIA ) == self.MODE_MEDIA:
nl1 = source_ref1.get_media_list()
nl2 = source_ref2.get_media_list()
if len(nl1) != len(nl2):
return False
for mediahandle in nl1:
if mediahandle not in nl2:
return False
# Can't find anything different. They must be equal.
return True
|
sam-m888/gprime
|
gprime/plug/report/_bibliography.py
|
Python
|
gpl-2.0
| 11,010
|
[
"Brian"
] |
4714b76eb2d2e00e0f3e6165a7fa727f62f132b1d79ae39b19b566b2f8231d7f
|
"""
Acceptance tests for Studio's Setting pages
"""
from flaky import flaky
from .base_studio_test import StudioCourseTest
from ...pages.studio.settings_certificates import CertificatesPage
class CertificatesTest(StudioCourseTest):
"""
Tests for settings/certificates Page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(CertificatesTest, self).setUp(is_staff=True)
self.certificates_page = CertificatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def make_signatory_data(self, prefix='First'):
"""
Makes signatory dict which can be used in the tests to create certificates
"""
return {
'name': '{prefix} Signatory Name'.format(prefix=prefix),
'title': '{prefix} Signatory Title'.format(prefix=prefix),
'organization': '{prefix} Signatory Organization'.format(prefix=prefix),
}
def create_and_verify_certificate(self, course_title_override, existing_certs, signatories):
"""
Creates a new certificate and verifies that it was properly created.
"""
self.assertEqual(existing_certs, len(self.certificates_page.certificates))
if existing_certs == 0:
self.certificates_page.wait_for_first_certificate_button()
self.certificates_page.click_first_certificate_button()
else:
self.certificates_page.wait_for_add_certificate_button()
self.certificates_page.click_add_certificate_button()
certificate = self.certificates_page.certificates[existing_certs]
# Set the certificate properties
certificate.course_title = course_title_override
# add signatories
added_signatories = 0
for idx, signatory in enumerate(signatories):
certificate.signatories[idx].name = signatory['name']
certificate.signatories[idx].title = signatory['title']
certificate.signatories[idx].organization = signatory['organization']
certificate.signatories[idx].upload_signature_image('Signature-{}.png'.format(idx))
added_signatories += 1
if len(signatories) > added_signatories:
certificate.click_add_signatory_button()
# Save the certificate
self.assertEqual(certificate.get_text('.action-primary'), "Create")
certificate.click_create_certificate_button()
self.assertIn(course_title_override, certificate.course_title)
return certificate
def test_no_certificates_by_default(self):
"""
Scenario: Ensure that message telling me to create a new certificate is
shown when no certificate exist.
Given I have a course without certificates
When I go to the Certificates page in Studio
Then I see "You have not created any certificates yet." message
"""
self.certificates_page.visit()
self.assertTrue(self.certificates_page.no_certificates_message_shown)
self.assertIn(
"You have not created any certificates yet.",
self.certificates_page.no_certificates_message_text
)
def test_can_create_and_edit_certficate(self):
"""
Scenario: Ensure that the certificates can be created and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has correct data
When I edit the certificate
And I change the name and click the button 'Save'
Then I see the certificate is saved successfully and has the new name
"""
self.certificates_page.visit()
self.certificates_page.wait_for_first_certificate_button()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
# Edit the certificate
certificate.click_edit_certificate_button()
certificate.course_title = "Updated Course Title Override 2"
self.assertEqual(certificate.get_text('.action-primary'), "Save")
certificate.click_save_certificate_button()
self.assertIn("Updated Course Title Override 2", certificate.course_title)
@flaky # TODO fix this, see SOL-1053
def test_can_delete_certificate(self):
"""
Scenario: Ensure that the user can delete certificate.
Given I have a course with 1 certificate
And I go to the Certificates page
When I delete the Certificate with name "New Certificate"
Then I see that there is no certificate
When I refresh the page
Then I see that the certificate has been deleted
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
certificate.wait_for_certificate_delete_button()
self.assertEqual(len(self.certificates_page.certificates), 1)
# Delete the certificate we just created
certificate.click_delete_certificate_button()
self.certificates_page.click_confirmation_prompt_primary_button()
# Reload the page and confirm there are no certificates
self.certificates_page.visit()
self.assertEqual(len(self.certificates_page.certificates), 0)
def test_can_create_and_edit_signatories_of_certficate(self):
"""
Scenario: Ensure that the certificates can be created with signatories and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has one signatory inside it
When I click 'Edit' button of signatory panel
And I set the name and click the button 'Save' icon
Then I see the signatory name updated with newly set name
When I refresh the certificates page
Then I can see course has one certificate with new signatory name
When I click 'Edit' button of signatory panel
And click on 'Close' button
Then I can see no change in signatory detail
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first')]
)
self.assertEqual(len(self.certificates_page.certificates), 1)
# Edit the signatory in certificate
signatory = certificate.signatories[0]
signatory.edit()
signatory.name = 'Updated signatory name'
signatory.title = 'Update signatory title'
signatory.organization = 'Updated signatory organization'
signatory.save()
self.assertEqual(len(self.certificates_page.certificates), 1)
signatory = self.certificates_page.certificates[0].signatories[0]
self.assertIn("Updated signatory name", signatory.name)
self.assertIn("Update signatory title", signatory.title)
self.assertIn("Updated signatory organization", signatory.organization)
signatory.edit()
signatory.close()
self.assertIn("Updated signatory name", signatory.name)
def test_can_cancel_creation_of_certificate(self):
"""
Scenario: Ensure that creation of a certificate can be canceled correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set name of certificate and click the button 'Cancel'
Then I see that there is no certificates in the course
"""
self.certificates_page.visit()
self.certificates_page.click_first_certificate_button()
certificate = self.certificates_page.certificates[0]
certificate.course_title = "Title Override"
certificate.click_cancel_edit_certificate()
self.assertEqual(len(self.certificates_page.certificates), 0)
|
Shrhawk/edx-platform
|
common/test/acceptance/tests/studio/test_studio_settings_certificates.py
|
Python
|
agpl-3.0
| 8,447
|
[
"VisIt"
] |
202298a7df2afc8acb89cee92920b49b6041ca4fcf4fd0b1d048aced7200841e
|
# -*- coding: utf-8 -*-
"""So much practical programming involves string manipulation, which
Python readily accomodates. Still, there are dozens of basic and
common capabilities missing from the standard library, several of them
provided by ``strutils``.
"""
from __future__ import print_function
import re
import sys
import uuid
import zlib
import string
import unicodedata
import collections
try:
unicode, str, bytes, basestring = unicode, str, str, basestring
from HTMLParser import HTMLParser
import htmlentitydefs
except NameError: # basestring not defined in Python 3
unicode, str, bytes, basestring = str, bytes, bytes, (str, bytes)
unichr = chr
from html.parser import HTMLParser
from html import entities as htmlentitydefs
__all__ = ['camel2under', 'under2camel', 'slugify', 'split_punct_ws',
'unit_len', 'ordinalize', 'cardinalize', 'pluralize', 'singularize',
'asciify', 'is_ascii', 'is_uuid', 'html2text', 'strip_ansi',
'bytes2human', 'find_hashtags', 'a10n', 'gunzip_bytes',
'iter_splitlines', 'indent', 'escape_shell_args',
'args2cmd', 'args2sh', 'parse_int_list', 'format_int_list']
_punct_ws_str = string.punctuation + string.whitespace
_punct_re = re.compile('[' + _punct_ws_str + ']+')
_camel2under_re = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
def camel2under(camel_string):
"""Converts a camelcased string to underscores. Useful for turning a
class name into a function name.
>>> camel2under('BasicParseTest')
'basic_parse_test'
"""
return _camel2under_re.sub(r'_\1', camel_string).lower()
def under2camel(under_string):
"""Converts an underscored string to camelcased. Useful for turning a
function name into a class name.
>>> under2camel('complex_tokenizer')
'ComplexTokenizer'
"""
return ''.join(w.capitalize() or '_' for w in under_string.split('_'))
def slugify(text, delim='_', lower=True, ascii=False):
"""
A basic function that turns text full of scary characters
(i.e., punctuation and whitespace), into a relatively safe
lowercased string separated only by the delimiter specified
by *delim*, which defaults to ``_``.
The *ascii* convenience flag will :func:`asciify` the slug if
you require ascii-only slugs.
>>> slugify('First post! Hi!!!!~1 ')
'first_post_hi_1'
>>> slugify("Kurt Gödel's pretty cool.", ascii=True) == \
b'kurt_goedel_s_pretty_cool'
True
"""
ret = delim.join(split_punct_ws(text)) or delim if text else ''
if ascii:
ret = asciify(ret)
if lower:
ret = ret.lower()
return ret
def split_punct_ws(text):
"""While :meth:`str.split` will split on whitespace,
:func:`split_punct_ws` will split on punctuation and
whitespace. This used internally by :func:`slugify`, above.
>>> split_punct_ws('First post! Hi!!!!~1 ')
['First', 'post', 'Hi', '1']
"""
return [w for w in _punct_re.split(text) if w]
def unit_len(sized_iterable, unit_noun='item'): # TODO: len_units()/unitize()?
"""Returns a plain-English description of an iterable's
:func:`len()`, conditionally pluralized with :func:`cardinalize`,
detailed below.
>>> print(unit_len(range(10), 'number'))
10 numbers
>>> print(unit_len('aeiou', 'vowel'))
5 vowels
>>> print(unit_len([], 'worry'))
No worries
"""
count = len(sized_iterable)
units = cardinalize(unit_noun, count)
if count:
return u'%s %s' % (count, units)
return u'No %s' % (units,)
_ORDINAL_MAP = {'1': 'st',
'2': 'nd',
'3': 'rd'} # 'th' is the default
def ordinalize(number, ext_only=False):
"""Turns *number* into its cardinal form, i.e., 1st, 2nd,
3rd, 4th, etc. If the last character isn't a digit, it returns the
string value unchanged.
Args:
number (int or str): Number to be cardinalized.
ext_only (bool): Whether to return only the suffix. Default ``False``.
>>> print(ordinalize(1))
1st
>>> print(ordinalize(3694839230))
3694839230th
>>> print(ordinalize('hi'))
hi
>>> print(ordinalize(1515))
1515th
"""
numstr, ext = unicode(number), ''
if numstr and numstr[-1] in string.digits:
try:
# first check for teens
if numstr[-2] == '1':
ext = 'th'
else:
# all other cases
ext = _ORDINAL_MAP.get(numstr[-1], 'th')
except IndexError:
# single digit numbers (will reach here based on [-2] above)
ext = _ORDINAL_MAP.get(numstr[-1], 'th')
if ext_only:
return ext
else:
return numstr + ext
def cardinalize(unit_noun, count):
"""Conditionally pluralizes a singular word *unit_noun* if
*count* is not one, preserving case when possible.
>>> vowels = 'aeiou'
>>> print(len(vowels), cardinalize('vowel', len(vowels)))
5 vowels
>>> print(3, cardinalize('Wish', 3))
3 Wishes
"""
if count == 1:
return unit_noun
return pluralize(unit_noun)
def singularize(word):
"""Semi-intelligently converts an English plural *word* to its
singular form, preserving case pattern.
>>> singularize('records')
'record'
>>> singularize('FEET')
'FOOT'
"""
orig_word, word = word, word.strip().lower()
if not word or word in _IRR_S2P:
return orig_word
irr_singular = _IRR_P2S.get(word)
if irr_singular:
singular = irr_singular
elif not word.endswith('s'):
return orig_word
elif len(word) == 2:
singular = word[:-1] # or just return word?
elif word.endswith('ies') and word[-5:-4] not in 'aeiou':
singular = word[:-3] + 'y'
elif word.endswith('es'):
singular = word[:-2]
else:
singular = word[:-1]
return _match_case(orig_word, singular)
def pluralize(word):
"""Semi-intelligently converts an English *word* from singular form to
plural, preserving case pattern.
>>> pluralize('friend')
'friends'
>>> pluralize('enemy')
'enemies'
>>> pluralize('Sheep')
'Sheep'
"""
orig_word, word = word, word.strip().lower()
if not word or word in _IRR_P2S:
return orig_word
irr_plural = _IRR_S2P.get(word)
if irr_plural:
plural = irr_plural
elif word.endswith('y') and word[-2:-1] not in 'aeiou':
plural = word[:-1] + 'ies'
elif word[-1] == 's' or word.endswith('ch') or word.endswith('sh'):
plural = word if word.endswith('es') else word + 'es'
else:
plural = word + 's'
return _match_case(orig_word, plural)
def _match_case(master, disciple):
if not master.strip():
return disciple
if master.lower() == master:
return disciple.lower()
elif master.upper() == master:
return disciple.upper()
elif master.capitalize() == master:
return disciple.capitalize()
return disciple
# Singular to plural map of irregular pluralizations
_IRR_S2P = {'addendum': 'addenda', 'alga': 'algae', 'alumna': 'alumnae',
'alumnus': 'alumni', 'analysis': 'analyses', 'antenna': 'antennae',
'appendix': 'appendices', 'axis': 'axes', 'bacillus': 'bacilli',
'bacterium': 'bacteria', 'basis': 'bases', 'beau': 'beaux',
'bison': 'bison', 'bureau': 'bureaus', 'cactus': 'cacti',
'calf': 'calves', 'child': 'children', 'corps': 'corps',
'corpus': 'corpora', 'crisis': 'crises', 'criterion': 'criteria',
'curriculum': 'curricula', 'datum': 'data', 'deer': 'deer',
'diagnosis': 'diagnoses', 'die': 'dice', 'dwarf': 'dwarves',
'echo': 'echoes', 'elf': 'elves', 'ellipsis': 'ellipses',
'embargo': 'embargoes', 'emphasis': 'emphases', 'erratum': 'errata',
'fireman': 'firemen', 'fish': 'fish', 'focus': 'foci',
'foot': 'feet', 'formula': 'formulae', 'formula': 'formulas',
'fungus': 'fungi', 'genus': 'genera', 'goose': 'geese',
'half': 'halves', 'hero': 'heroes', 'hippopotamus': 'hippopotami',
'hoof': 'hooves', 'hypothesis': 'hypotheses', 'index': 'indices',
'knife': 'knives', 'leaf': 'leaves', 'life': 'lives',
'loaf': 'loaves', 'louse': 'lice', 'man': 'men',
'matrix': 'matrices', 'means': 'means', 'medium': 'media',
'memorandum': 'memoranda', 'millennium': 'milennia', 'moose': 'moose',
'mosquito': 'mosquitoes', 'mouse': 'mice', 'nebula': 'nebulae',
'neurosis': 'neuroses', 'nucleus': 'nuclei', 'oasis': 'oases',
'octopus': 'octopi', 'offspring': 'offspring', 'ovum': 'ova',
'ox': 'oxen', 'paralysis': 'paralyses', 'parenthesis': 'parentheses',
'person': 'people', 'phenomenon': 'phenomena', 'potato': 'potatoes',
'radius': 'radii', 'scarf': 'scarves', 'scissors': 'scissors',
'self': 'selves', 'series': 'series', 'sheep': 'sheep',
'shelf': 'shelves', 'species': 'species', 'stimulus': 'stimuli',
'stratum': 'strata', 'syllabus': 'syllabi', 'symposium': 'symposia',
'synopsis': 'synopses', 'synthesis': 'syntheses', 'tableau': 'tableaux',
'that': 'those', 'thesis': 'theses', 'thief': 'thieves',
'this': 'these', 'tomato': 'tomatoes', 'tooth': 'teeth',
'torpedo': 'torpedoes', 'vertebra': 'vertebrae', 'veto': 'vetoes',
'vita': 'vitae', 'watch': 'watches', 'wife': 'wives',
'wolf': 'wolves', 'woman': 'women'}
# Reverse index of the above
_IRR_P2S = dict([(v, k) for k, v in _IRR_S2P.items()])
HASHTAG_RE = re.compile(r"(?:^|\s)[##]{1}(\w+)", re.UNICODE)
def find_hashtags(string):
"""Finds and returns all hashtags in a string, with the hashmark
removed. Supports full-width hashmarks for Asian languages and
does not false-positive on URL anchors.
>>> find_hashtags('#atag http://asite/#ananchor')
['atag']
``find_hashtags`` also works with unicode hashtags.
"""
# the following works, doctest just struggles with it
# >>> find_hashtags(u"can't get enough of that dignity chicken #肯德基 woo")
# [u'\u80af\u5fb7\u57fa']
return HASHTAG_RE.findall(string)
def a10n(string):
"""That thing where "internationalization" becomes "i18n", what's it
called? Abbreviation? Oh wait, no: ``a10n``. (It's actually a form
of `numeronym`_.)
>>> a10n('abbreviation')
'a10n'
>>> a10n('internationalization')
'i18n'
>>> a10n('')
''
.. _numeronym: http://en.wikipedia.org/wiki/Numeronym
"""
if len(string) < 3:
return string
return '%s%s%s' % (string[0], len(string[1:-1]), string[-1])
class StringBuffer(object):
"""
This is meant to be a better file-like string buffer.
Faster than StringIO, better encoding handling than cStringIO.
This one is for unicode text strings. Look for ByteBuffer if you
want to handle byte strings.
(NOTE: not quite done yet)
"""
def __init__(self, default_encoding=None, errors='strict'):
self.data = collections.deque()
self.default_encoding = default_encoding or 'utf-8'
self.errors = errors
def write(self, s):
if not isinstance(s, unicode):
enc = self.default_encoding
errs = self.errors
try:
s = s.decode(enc, errs)
except AttributeError:
raise ValueError('write() expected a unicode or byte string')
self.data.append(s)
def truncate(self):
self.data = collections.deque()
self.write = self.data.append
def getvalue(self):
return unicode().join(self.data)
ANSI_ESCAPE_BEGIN = '\x1b['
ANSI_TERMINATORS = ('H', 'f', 'A', 'B', 'C', 'D', 'R', 's', 'u', 'J',
'K', 'h', 'l', 'p', 'm')
def strip_ansi(text):
"""Strips ANSI escape codes from *text*. Useful for the occasional
time when a log or redirected output accidentally captures console
color codes and the like.
>>> strip_ansi('\x1b[0m\x1b[1;36mart\x1b[46;34m\xdc')
'art'
The test above is an excerpt from ANSI art on
`sixteencolors.net`_. This function does not interpret or render
ANSI art, but you can do so with `ansi2img`_ or `escapes.js`_.
.. _sixteencolors.net: http://sixteencolors.net
.. _ansi2img: http://www.bedroomlan.org/projects/ansi2img
.. _escapes.js: https://github.com/atdt/escapes.js
"""
# TODO: move to cliutils.py
nansi, keep, i, text_len = [], True, 0, len(text)
while i < text_len:
if not keep and text[i] in ANSI_TERMINATORS:
keep = True
elif keep:
keep_end_i = text.find(ANSI_ESCAPE_BEGIN, i)
if keep_end_i < 0:
break
else:
nansi.append(text[i:keep_end_i])
i, keep = keep_end_i, False
i += 1
if not nansi:
return text
return type(text)().join(nansi) # attempted unicode + str support
def asciify(text, ignore=False):
"""Converts a unicode or bytestring, *text*, into a bytestring with
just ascii characters. Performs basic deaccenting for all you
Europhiles out there.
Also, a gentle reminder that this is a **utility**, primarily meant
for slugification. Whenever possible, make your application work
**with** unicode, not against it.
Args:
text (str or unicode): The string to be asciified.
ignore (bool): Configures final encoding to ignore remaining
unasciified unicode instead of replacing it.
>>> asciify('Beyoncé') == b'Beyonce'
True
"""
try:
try:
return text.encode('ascii')
except UnicodeDecodeError:
# this usually means you passed in a non-unicode string
text = text.decode('utf-8')
return text.encode('ascii')
except UnicodeEncodeError:
mode = 'replace'
if ignore:
mode = 'ignore'
transd = unicodedata.normalize('NFKD', text.translate(DEACCENT_MAP))
ret = transd.encode('ascii', mode)
return ret
def is_ascii(text):
"""Check if a unicode or bytestring, *text*, is composed of ascii
characters only. Raises :exc:`ValueError` if argument is not text.
Args:
text (str or unicode): The string to be checked.
>>> is_ascii('Beyoncé')
False
>>> is_ascii('Beyonce')
True
"""
if isinstance(text, unicode):
try:
text.encode('ascii')
except UnicodeEncodeError:
return False
elif isinstance(text, bytes):
try:
text.decode('ascii')
except UnicodeDecodeError:
return False
else:
raise ValueError('expected text or bytes, not %r' % type(text))
return True
class DeaccenterDict(dict):
"A small caching dictionary for deaccenting."
def __missing__(self, key):
ch = self.get(key)
if ch is not None:
return ch
try:
de = unicodedata.decomposition(unichr(key))
p1, _, p2 = de.rpartition(' ')
if int(p2, 16) == 0x308:
ch = self.get(key)
else:
ch = int(p1, 16)
except (IndexError, ValueError):
ch = self.get(key, key)
self[key] = ch
return ch
try:
from collections import defaultdict
except ImportError:
# no defaultdict means that __missing__ isn't supported in
# this version of python, so we define __getitem__
def __getitem__(self, key):
try:
return super(DeaccenterDict, self).__getitem__(key)
except KeyError:
return self.__missing__(key)
else:
del defaultdict
# http://chmullig.com/2009/12/python-unicode-ascii-ifier/
# For something more complete, investigate the unidecode
# or isounidecode packages, which are capable of performing
# crude transliteration.
_BASE_DEACCENT_MAP = {
0xc6: u"AE", # Æ LATIN CAPITAL LETTER AE
0xd0: u"D", # Ð LATIN CAPITAL LETTER ETH
0xd8: u"OE", # Ø LATIN CAPITAL LETTER O WITH STROKE
0xde: u"Th", # Þ LATIN CAPITAL LETTER THORN
0xc4: u'Ae', # Ä LATIN CAPITAL LETTER A WITH DIAERESIS
0xd6: u'Oe', # Ö LATIN CAPITAL LETTER O WITH DIAERESIS
0xdc: u'Ue', # Ü LATIN CAPITAL LETTER U WITH DIAERESIS
0xc0: u"A", # À LATIN CAPITAL LETTER A WITH GRAVE
0xc1: u"A", # Á LATIN CAPITAL LETTER A WITH ACUTE
0xc3: u"A", # Ã LATIN CAPITAL LETTER A WITH TILDE
0xc7: u"C", # Ç LATIN CAPITAL LETTER C WITH CEDILLA
0xc8: u"E", # È LATIN CAPITAL LETTER E WITH GRAVE
0xc9: u"E", # É LATIN CAPITAL LETTER E WITH ACUTE
0xca: u"E", # Ê LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0xcc: u"I", # Ì LATIN CAPITAL LETTER I WITH GRAVE
0xcd: u"I", # Í LATIN CAPITAL LETTER I WITH ACUTE
0xd2: u"O", # Ò LATIN CAPITAL LETTER O WITH GRAVE
0xd3: u"O", # Ó LATIN CAPITAL LETTER O WITH ACUTE
0xd5: u"O", # Õ LATIN CAPITAL LETTER O WITH TILDE
0xd9: u"U", # Ù LATIN CAPITAL LETTER U WITH GRAVE
0xda: u"U", # Ú LATIN CAPITAL LETTER U WITH ACUTE
0xdf: u"ss", # ß LATIN SMALL LETTER SHARP S
0xe6: u"ae", # æ LATIN SMALL LETTER AE
0xf0: u"d", # ð LATIN SMALL LETTER ETH
0xf8: u"oe", # ø LATIN SMALL LETTER O WITH STROKE
0xfe: u"th", # þ LATIN SMALL LETTER THORN,
0xe4: u'ae', # ä LATIN SMALL LETTER A WITH DIAERESIS
0xf6: u'oe', # ö LATIN SMALL LETTER O WITH DIAERESIS
0xfc: u'ue', # ü LATIN SMALL LETTER U WITH DIAERESIS
0xe0: u"a", # à LATIN SMALL LETTER A WITH GRAVE
0xe1: u"a", # á LATIN SMALL LETTER A WITH ACUTE
0xe3: u"a", # ã LATIN SMALL LETTER A WITH TILDE
0xe7: u"c", # ç LATIN SMALL LETTER C WITH CEDILLA
0xe8: u"e", # è LATIN SMALL LETTER E WITH GRAVE
0xe9: u"e", # é LATIN SMALL LETTER E WITH ACUTE
0xea: u"e", # ê LATIN SMALL LETTER E WITH CIRCUMFLEX
0xec: u"i", # ì LATIN SMALL LETTER I WITH GRAVE
0xed: u"i", # í LATIN SMALL LETTER I WITH ACUTE
0xf2: u"o", # ò LATIN SMALL LETTER O WITH GRAVE
0xf3: u"o", # ó LATIN SMALL LETTER O WITH ACUTE
0xf5: u"o", # õ LATIN SMALL LETTER O WITH TILDE
0xf9: u"u", # ù LATIN SMALL LETTER U WITH GRAVE
0xfa: u"u", # ú LATIN SMALL LETTER U WITH ACUTE
0x2018: u"'", # ‘ LEFT SINGLE QUOTATION MARK
0x2019: u"'", # ’ RIGHT SINGLE QUOTATION MARK
0x201c: u'"', # “ LEFT DOUBLE QUOTATION MARK
0x201d: u'"', # ” RIGHT DOUBLE QUOTATION MARK
}
DEACCENT_MAP = DeaccenterDict(_BASE_DEACCENT_MAP)
_SIZE_SYMBOLS = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
_SIZE_BOUNDS = [(1024 ** i, sym) for i, sym in enumerate(_SIZE_SYMBOLS)]
_SIZE_RANGES = list(zip(_SIZE_BOUNDS, _SIZE_BOUNDS[1:]))
def bytes2human(nbytes, ndigits=0):
"""Turns an integer value of *nbytes* into a human readable format. Set
*ndigits* to control how many digits after the decimal point
should be shown (default ``0``).
>>> bytes2human(128991)
'126K'
>>> bytes2human(100001221)
'95M'
>>> bytes2human(0, 2)
'0.00B'
"""
abs_bytes = abs(nbytes)
for (size, symbol), (next_size, next_symbol) in _SIZE_RANGES:
if abs_bytes <= next_size:
break
hnbytes = float(nbytes) / size
return '{hnbytes:.{ndigits}f}{symbol}'.format(hnbytes=hnbytes,
ndigits=ndigits,
symbol=symbol)
class HTMLTextExtractor(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.result = []
def handle_data(self, d):
self.result.append(d)
def handle_charref(self, number):
if number[0] == u'x' or number[0] == u'X':
codepoint = int(number[1:], 16)
else:
codepoint = int(number)
self.result.append(unichr(codepoint))
def handle_entityref(self, name):
try:
codepoint = htmlentitydefs.name2codepoint[name]
except KeyError:
self.result.append(u'&' + name + u';')
else:
self.result.append(unichr(codepoint))
def get_text(self):
return u''.join(self.result)
def html2text(html):
"""Strips tags from HTML text, returning markup-free text. Also, does
a best effort replacement of entities like " "
>>> r = html2text(u'<a href="#">Test &<em>(\u0394ημώ)</em></a>')
>>> r == u'Test &(\u0394\u03b7\u03bc\u03ce)'
True
"""
# based on answers to http://stackoverflow.com/questions/753052/
s = HTMLTextExtractor()
s.feed(html)
return s.get_text()
_EMPTY_GZIP_BYTES = b'\x1f\x8b\x08\x089\xf3\xb9U\x00\x03empty\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00'
_NON_EMPTY_GZIP_BYTES = b'\x1f\x8b\x08\x08\xbc\xf7\xb9U\x00\x03not_empty\x00K\xaa,I-N\xcc\xc8\xafT\xe4\x02\x00\xf3nb\xbf\x0b\x00\x00\x00'
def gunzip_bytes(bytestring):
"""The :mod:`gzip` module is great if you have a file or file-like
object, but what if you just have bytes. StringIO is one
possibility, but it's often faster, easier, and simpler to just
use this one-liner. Use this tried-and-true utility function to
decompress gzip from bytes.
>>> gunzip_bytes(_EMPTY_GZIP_BYTES) == b''
True
>>> gunzip_bytes(_NON_EMPTY_GZIP_BYTES).rstrip() == b'bytesahoy!'
True
"""
return zlib.decompress(bytestring, 16 + zlib.MAX_WBITS)
_line_ending_re = re.compile(r'(\r\n|\n|\x0b|\f|\r|\x85|\x2028|\x2029)',
re.UNICODE)
def iter_splitlines(text):
r"""Like :meth:`str.splitlines`, but returns an iterator of lines
instead of a list. Also similar to :meth:`file.next`, as that also
lazily reads and yields lines from a file.
This function works with a variety of line endings, but as always,
be careful when mixing line endings within a file.
>>> list(iter_splitlines('\nhi\nbye\n'))
['', 'hi', 'bye', '']
>>> list(iter_splitlines('\r\nhi\rbye\r\n'))
['', 'hi', 'bye', '']
>>> list(iter_splitlines(''))
[]
"""
prev_end, len_text = 0, len(text)
# print('last: %r' % last_idx)
# start, end = None, None
for match in _line_ending_re.finditer(text):
start, end = match.start(1), match.end(1)
# print(start, end)
if prev_end <= start:
yield text[prev_end:start]
if end == len_text:
yield ''
prev_end = end
tail = text[prev_end:]
if tail:
yield tail
return
def indent(text, margin, newline='\n', key=bool):
"""The missing counterpart to the built-in :func:`textwrap.dedent`.
Args:
text (str): The text to indent.
margin (str): The string to prepend to each line.
newline (str): The newline used to rejoin the lines (default: ``\\n``)
key (callable): Called on each line to determine whether to
indent it. Default: :class:`bool`, to ensure that empty lines do
not get whitespace added.
"""
indented_lines = [(margin + line if key(line) else line)
for line in iter_splitlines(text)]
return newline.join(indented_lines)
def is_uuid(obj, version=4):
"""Check the argument is either a valid UUID object or string.
Args:
obj (object): The test target. Strings and UUID objects supported.
version (int): The target UUID version, set to 0 to skip version check.
>>> is_uuid('e682ccca-5a4c-4ef2-9711-73f9ad1e15ea')
True
>>> is_uuid('0221f0d9-d4b9-11e5-a478-10ddb1c2feb9')
False
>>> is_uuid('0221f0d9-d4b9-11e5-a478-10ddb1c2feb9', version=1)
True
"""
if not isinstance(obj, uuid.UUID):
try:
obj = uuid.UUID(obj)
except (TypeError, ValueError, AttributeError):
return False
if version and obj.version != int(version):
return False
return True
def escape_shell_args(args, sep=' ', style=None):
"""Returns an escaped version of each string in *args*, according to
*style*.
Args:
args (list): A list of arguments to escape and join together
sep (str): The separator used to join the escaped arguments.
style (str): The style of escaping to use. Can be one of
``cmd`` or ``sh``, geared toward Windows and Linux/BSD/etc.,
respectively. If *style* is ``None``, then it is picked
according to the system platform.
See :func:`args2cmd` and :func:`args2sh` for details and example
output for each style.
"""
if not style:
style = 'cmd' if sys.platform == 'win32' else 'sh'
if style == 'sh':
return args2sh(args, sep=sep)
elif style == 'cmd':
return args2cmd(args, sep=sep)
raise ValueError("style expected one of 'cmd' or 'sh', not %r" % style)
_find_sh_unsafe = re.compile(r'[^a-zA-Z0-9_@%+=:,./-]').search
def args2sh(args, sep=' '):
"""Return a shell-escaped string version of *args*, separated by
*sep*, based on the rules of sh, bash, and other shells in the
Linux/BSD/MacOS ecosystem.
>>> print(args2sh(['aa', '[bb]', "cc'cc", 'dd"dd']))
aa '[bb]' 'cc'"'"'cc' 'dd"dd'
As you can see, arguments with no special characters are not
escaped, arguments with special characters are quoted with single
quotes, and single quotes themselves are quoted with double
quotes. Double quotes are handled like any other special
character.
Based on code from the :mod:`pipes`/:mod:`shlex` modules. Also
note that :mod:`shlex` and :mod:`argparse` have functions to split
and parse strings escaped in this manner.
"""
ret_list = []
for arg in args:
if not arg:
ret_list.append("''")
continue
if _find_sh_unsafe(arg) is None:
ret_list.append(arg)
continue
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
ret_list.append("'" + arg.replace("'", "'\"'\"'") + "'")
return ' '.join(ret_list)
def args2cmd(args, sep=' '):
r"""Return a shell-escaped string version of *args*, separated by
*sep*, using the same rules as the Microsoft C runtime.
>>> print(args2cmd(['aa', '[bb]', "cc'cc", 'dd"dd']))
aa [bb] cc'cc dd\"dd
As you can see, escaping is through backslashing and not quoting,
and double quotes are the only special character. See the comment
in the code for more details. Based on internal code from the
:mod:`subprocess` module.
"""
# technique description from subprocess below
"""
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
or search http://msdn.microsoft.com for
"Parsing C++ Command-Line Arguments"
"""
result = []
needquote = False
for arg in args:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
def parse_int_list(range_string, delim=',', range_delim='-'):
"""Returns a sorted list of positive integers based on
*range_string*. Reverse of :func:`format_int_list`.
Args:
range_string (str): String of comma separated positive
integers or ranges (e.g. '1,2,4-6,8'). Typical of a custom
page range string used in printer dialogs.
delim (char): Defaults to ','. Separates integers and
contiguous ranges of integers.
range_delim (char): Defaults to '-'. Indicates a contiguous
range of integers.
>>> parse_int_list('1,3,5-8,10-11,15')
[1, 3, 5, 6, 7, 8, 10, 11, 15]
"""
output = []
for x in range_string.strip().split(delim):
# Range
if range_delim in x:
range_limits = list(map(int, x.split(range_delim)))
output += list(range(min(range_limits), max(range_limits)+1))
# Empty String
elif not x:
continue
# Integer
else:
output.append(int(x))
return sorted(output)
def format_int_list(int_list, delim=',', range_delim='-', delim_space=False):
"""Returns a sorted range string from a list of positive integers
(*int_list*). Contiguous ranges of integers are collapsed to min
and max values. Reverse of :func:`parse_int_list`.
Args:
int_list (list): List of positive integers to be converted
into a range string (e.g. [1,2,4,5,6,8]).
delim (char): Defaults to ','. Separates integers and
contiguous ranges of integers.
range_delim (char): Defaults to '-'. Indicates a contiguous
range of integers.
delim_space (bool): Defaults to ``False``. If ``True``, adds a
space after all *delim* characters.
>>> format_int_list([1,3,5,6,7,8,10,11,15])
'1,3,5-8,10-11,15'
"""
output = []
contig_range = collections.deque()
for x in sorted(int_list):
# Handle current (and first) value.
if len(contig_range) < 1:
contig_range.append(x)
# Handle current value, given multiple previous values are contiguous.
elif len(contig_range) > 1:
delta = x - contig_range[-1]
# Current value is contiguous.
if delta == 1:
contig_range.append(x)
# Current value is non-contiguous.
elif delta > 1:
range_substr = '{0:d}{1}{2:d}'.format(min(contig_range),
range_delim,
max(contig_range))
output.append(range_substr)
contig_range.clear()
contig_range.append(x)
# Current value repeated.
else:
continue
# Handle current value, given no previous contiguous integers
else:
delta = x - contig_range[0]
# Current value is contiguous.
if delta == 1:
contig_range.append(x)
# Current value is non-contiguous.
elif delta > 1:
output.append('{0:d}'.format(contig_range.popleft()))
contig_range.append(x)
# Current value repeated.
else:
continue
# Handle the last value.
else:
# Last value is non-contiguous.
if len(contig_range) == 1:
output.append('{0:d}'.format(contig_range.popleft()))
contig_range.clear()
# Last value is part of contiguous range.
elif len(contig_range) > 1:
range_substr = '{0:d}{1}{2:d}'.format(min(contig_range),
range_delim,
max(contig_range))
output.append(range_substr)
contig_range.clear()
if delim_space:
output_str = (delim+' ').join(output)
else:
output_str = delim.join(output)
return output_str
|
zeroSteiner/boltons
|
boltons/strutils.py
|
Python
|
bsd-3-clause
| 32,988
|
[
"MOOSE",
"Octopus"
] |
1cd8d418e95219d8d3ac34a15eb50eccbd7eaf6c0d1a79986c6e8095cd1a1da0
|
import collect_array as ca
import collect_device as cd
import collect_gen as cg
import collect_id as ci
from processing import collect_loop as cl
def print_dict_sorted(mydict):
keys = sorted(mydict)
entries = ""
for key in keys:
value = mydict[key]
entries += "'" + key + "': " + value.__repr__() + ","
return "{" + entries[:-1] + "}"
class FindPerfectForLoop(object):
def __init__(self):
self.perfect_for_loop = cl.FindPerfectForLoop()
self.ParDim = None
def collect(self, ast):
self.perfect_for_loop.visit(ast)
@property
def par_dim(self):
if self.ParDim is None:
return self.perfect_for_loop.depth
else:
return self.ParDim
class FindLocal(FindPerfectForLoop):
def __init__(self):
super(FindLocal, self).__init__()
self.Local = dict()
self.Local['name'] = 'LSIZE'
self.Local['size'] = ['64']
def collect(self, ast, dev='CPU'):
super(FindLocal, self).collect(ast)
if self.par_dim == 1:
self.Local['size'] = ['256']
if dev == 'CPU':
self.Local['size'] = ['16']
else:
self.Local['size'] = ['16', '16']
if dev == 'CPU':
self.Local['size'] = ['4', '4']
class FindGridIndices(FindPerfectForLoop):
def __init__(self):
super(FindGridIndices, self).__init__()
self.GridIndices = list()
self.Kernel = None
self.IdxToDim = dict()
self.RefToLoop = dict()
def collect(self, ast):
super(FindGridIndices, self).collect(ast)
fker = cd.FindKernel(self.par_dim)
fker.visit(ast)
self.Kernel = fker.kernel
col_li = cl.LoopIndices(self.par_dim)
col_li.visit(ast)
self.GridIndices = col_li.grid_indices
gi_to_dim = cg.GenIdxToDim()
gi_to_dim.collect(ast, self.par_dim)
self.IdxToDim = gi_to_dim.IdxToDim
find_ref_to_loop_index = ca.FindRefToLoopIndex()
find_ref_to_loop_index.collect(ast)
self.RefToLoop = find_ref_to_loop_index.RefToLoop
class FindLoops(FindPerfectForLoop):
def __init__(self):
super(FindLoops, self).__init__()
self.ArrayIdToDimName = dict()
self.Loops = dict()
self.col_loop_limit = cl.LoopLimit()
self.num_array_dims = dict()
def collect(self, ast):
super(FindLoops, self).collect(ast)
find_inner_loops = cl.FindInnerLoops()
find_inner_loops.collect(ast)
self.Loops = find_inner_loops.Loops
self.col_loop_limit = cl.LoopLimit()
self.col_loop_limit.visit(ast)
num_array_dim = ca.NumArrayDim(ast)
num_array_dim.visit(ast)
self.num_array_dims = num_array_dim.numSubscripts
gen_array_dim_names = cg.GenArrayDimNames()
gen_array_dim_names.collect(ast)
self.ArrayIdToDimName = gen_array_dim_names.ArrayIdToDimName
@property
def upper_limit(self):
return self.col_loop_limit.upper_limit
@property
def lower_limit(self):
return self.col_loop_limit.lower_limit
class FindSubscripts(FindLoops):
def __init__(self):
super(FindSubscripts, self).__init__()
self.Subscript = dict()
self.SubscriptNoId = dict()
def collect(self, ast):
super(FindSubscripts, self).collect(ast)
self.Subscript = ca.get_subscript(ast)
self.SubscriptNoId = ca.get_subscript_no_id(ast)
class RemovedLoopLimit(FindLoops):
def __init__(self):
super(RemovedLoopLimit, self).__init__()
self.RemovedIds = set()
self.ParDim = None
def collect(self, ast):
super(RemovedLoopLimit, self).collect(ast)
fgi = FindGridIndices()
fgi.ParDim = self.ParDim
fgi.collect(ast)
find_removed_ids = cg.GenRemovedIds()
find_removed_ids.collect(ast)
self.RemovedIds = find_removed_ids.removed_ids
class FindArrayIds(RemovedLoopLimit):
def __init__(self):
super(FindArrayIds, self).__init__()
self.ArrayIds = set()
self.NonArrayIds = set()
self.type = set()
def collect(self, ast):
super(FindArrayIds, self).collect(ast)
self.ArrayIds = ca.get_array_ids(ast)
# print self.ArrayIds
nonarray_ids = ci.GlobalNonArrayIds()
nonarray_ids.visit(ast)
self.NonArrayIds = nonarray_ids.ids
mytype_ids = ci.GlobalTypeIds()
mytype_ids.visit(ast)
# print print_dict_sorted(mytype_ids.dictIds)
self.type = mytype_ids.types
class FindArrayIdsKernel(FindArrayIds):
def __init__(self):
super(FindArrayIdsKernel, self).__init__()
self.kernel_args = dict()
def collect(self, ast):
super(FindArrayIdsKernel, self).collect(ast)
gen_kernel_args = cg.GenKernelArgs()
gen_kernel_args.collect(ast)
self.kernel_args = gen_kernel_args.kernel_args
class FindReadWrite(FindArrayIdsKernel):
def __init__(self):
super(FindReadWrite, self).__init__()
self.ReadWrite = dict()
self.WriteOnly = list()
self.ReadOnly = list()
def collect(self, ast):
super(FindReadWrite, self).collect(ast)
find_read_write = ca.FindReadWrite()
find_read_write.collect(ast)
self.ReadWrite = find_read_write.ReadWrite
for n in self.ReadWrite:
pset = self.ReadWrite[n]
if len(pset) == 1:
if 'write' in pset:
self.WriteOnly.append(n)
else:
self.ReadOnly.append(n)
|
dikujepsen/OpenTran
|
v2.0/framework/unused/collect_transformation_info_unused.py
|
Python
|
mit
| 5,686
|
[
"VisIt"
] |
0af9fc50bd421e0ce04c7d805c88c569ced5d81a2808723341e1727aea1df8ad
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes functions with multiple returns to use just one."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
BODY_DEFINITELY_RETURNS = 'BODY_DEFINITELY_RETURNS'
ORELSE_DEFINITELY_RETURNS = 'ORELSE_DEFINITELY_RETURNS'
STMT_DEFINITELY_RETURNS = 'STMT_DEFINITELY_RETURNS'
class _RewriteBlock(object):
def __init__(self):
self.definitely_returns = False
class ConditionalReturnRewriter(converter.Base):
"""Rewrites a a pattern where it's unbovious that all paths return a value.
This rewrite allows avoiding intermediate None return values.
The following pattern:
if cond:
<block 1>
return
else:
<block 2>
<block 3>
is converted to:
if cond:
<block 1>
return
else:
<block 2>
<block 3>
and vice-versa (if the else returns, subsequent statements are moved under the
if branch).
"""
def visit_Return(self, node):
self.state[_RewriteBlock].definitely_returns = True
return node
def _postprocess_statement(self, node):
# If the node definitely returns (e.g. it's a with statement with a
# return statement in it), then the current block also definitely returns.
if anno.getanno(node, STMT_DEFINITELY_RETURNS, default=False):
self.state[_RewriteBlock].definitely_returns = True
# The special case: collapse a typical conditional return pattern into
# a single conditional with possibly returns on both branches. This
# reduces the use of None return values, which don't work with TF
# conditionals.
if (isinstance(node, gast.If)
and anno.getanno(node, BODY_DEFINITELY_RETURNS, default=False)):
return node, node.orelse
elif (isinstance(node, gast.If)
and anno.getanno(node, ORELSE_DEFINITELY_RETURNS, default=False)):
return node, node.body
return node, None
def _visit_statement_block(self, node, nodes):
self.state[_RewriteBlock].enter()
new_nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
block_definitely_returns = self.state[_RewriteBlock].definitely_returns
self.state[_RewriteBlock].exit()
return new_nodes, block_definitely_returns
def visit_While(self, node):
node.test = self.visit(node.test)
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
return node
def visit_For(self, node):
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body, definitely_returns = self._visit_statement_block(node, node.body)
if definitely_returns:
anno.setanno(node, STMT_DEFINITELY_RETURNS, True)
return node
def visit_Try(self, node):
# We could decide whether a 'try' DEFINITELY_RETURNS based on its components
# It is not clear whether we want to do anything with this given
# a 'try' is likely to throw an exception in some circumstances.
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
node.finalbody, _ = self._visit_statement_block(node, node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
# To determine whether `try` DEFINITELY_RETURNS we need to revisit this.
node.body, _ = self._visit_statement_block(node, node.body)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body, body_definitely_returns = self._visit_statement_block(
node, node.body)
if body_definitely_returns:
anno.setanno(node, BODY_DEFINITELY_RETURNS, True)
node.orelse, orelse_definitely_returns = self._visit_statement_block(
node, node.orelse)
if orelse_definitely_returns:
anno.setanno(node, ORELSE_DEFINITELY_RETURNS, True)
if body_definitely_returns and orelse_definitely_returns:
self.state[_RewriteBlock].definitely_returns = True
return node
def visit_FunctionDef(self, node):
node.args = self.visit(node.args)
node.body, _ = self._visit_statement_block(node, node.body)
return node
class _Block(object):
def __init__(self):
self.is_function = False
self.return_used = False
self.create_guard_next = False
self.create_guard_now = False
def __repr__(self):
return 'used: {}'.format(
self.return_used)
class _Function(object):
def __init__(self):
self.do_return_var_name = None
self.retval_var_name = None
def __repr__(self):
return 'return control: {}, return value: {}'.format(
self.do_return_var_name, self.retval_var_name)
class ReturnStatementsTransformer(converter.Base):
"""Lowers return statements into variables and conditionals.
Specifically, the following pattern:
<block 1>
return val
<block 2>
is converted to:
do_return = False
retval = None
<block 1>
do_return = True
retval = val
if not do_return:
<block 2>
return retval
The conversion adjusts loops as well:
<block 1>
while cond:
<block 2>
return retval
is converted to:
<block 1>
while not do_return and cond:
<block 2>
do_return = True
retval = val
"""
def __init__(self, ctx, default_to_null_return):
super(ReturnStatementsTransformer, self).__init__(ctx)
self.default_to_null_return = default_to_null_return
def visit_Return(self, node):
for block in reversed(self.state[_Block].stack):
block.return_used = True
block.create_guard_next = True
if block.is_function:
break
retval = node.value if node.value else parser.parse_expression('None')
template = """
do_return_var_name = True
retval_var_name = retval
"""
node = templates.replace(
template,
do_return_var_name=self.state[_Function].do_return_var_name,
retval_var_name=self.state[_Function].retval_var_name,
retval=retval)
return node
def _postprocess_statement(self, node):
if not self.state[_Block].return_used:
return node, None
state = self.state[_Block]
if state.create_guard_now:
template = """
if ag__.not_(do_return_var_name):
original_node
"""
cond, = templates.replace(
template,
do_return_var_name=self.state[_Function].do_return_var_name,
original_node=node)
node, block = cond, cond.body
else:
node, block = node, None
state.create_guard_now = state.create_guard_next
state.create_guard_next = False
return node, block
def _visit_statement_block(self, node, nodes):
self.state[_Block].enter()
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
self.state[_Block].exit()
return nodes
def visit_While(self, node):
node.test = self.visit(node.test)
# Add the check for return to the loop condition.
node.body = self._visit_statement_block(node, node.body)
if self.state[_Block].return_used:
node.test = templates.replace_as_expression(
'ag__.and_(lambda: ag__.not_(control_var), lambda: test)',
test=node.test,
control_var=self.state[_Function].do_return_var_name)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_For(self, node):
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
# Add the check for return to the loop condition.
node.body = self._visit_statement_block(node, node.body)
if self.state[_Block].return_used:
extra_test = anno.getanno(node, 'extra_test', default=None)
if extra_test is not None:
extra_test = templates.replace_as_expression(
'ag__.and_(lambda: ag__.not_(control_var), lambda: extra_test)',
extra_test=extra_test,
control_var=self.state[_Function].do_return_var_name)
else:
extra_test = templates.replace_as_expression(
'ag__.not_(control_var)',
control_var=self.state[_Function].do_return_var_name)
anno.setanno(node, 'extra_test', extra_test)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_statement_block(node, node.body)
return node
def visit_Try(self, node):
node.body = self._visit_statement_block(node, node.body)
node.orelse = self._visit_statement_block(node, node.orelse)
node.finalbody = self._visit_statement_block(node, node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
node.body = self._visit_statement_block(node, node.body)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body = self._visit_statement_block(node, node.body)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_FunctionDef(self, node):
self.state[_Function].enter()
self.state[_Block].enter()
self.state[_Block].is_function = True
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
do_return_var_name = self.ctx.namer.new_symbol(
'do_return', scope.referenced)
retval_var_name = self.ctx.namer.new_symbol('retval_', scope.referenced)
self.state[_Function].do_return_var_name = do_return_var_name
self.state[_Function].retval_var_name = retval_var_name
converted_body = self._visit_statement_block(node, node.body)
# Avoid placing statements before any eventual docstring.
# TODO(mdan): Should a docstring even be included in the output?
docstring = None
if converted_body:
if (isinstance(converted_body[0], gast.Expr) and
isinstance(converted_body[0].value, gast.Constant)):
docstring = converted_body[0]
converted_body = converted_body[1:]
if self.state[_Block].return_used:
if self.default_to_null_return:
template = """
do_return_var_name = False
retval_var_name = ag__.UndefinedReturnValue()
body
# TODO(b/134753123) Remove the do_return_var_name tuple.
(do_return_var_name,)
return ag__.retval(retval_var_name)
"""
else:
# TODO(b/134753123) Fix loops that return when do_return is not set.
template = """
body
return retval_var_name
"""
node.body = templates.replace(
template,
body=converted_body,
do_return_var_name=do_return_var_name,
retval_var_name=retval_var_name)
if docstring:
node.body.insert(0, docstring)
self.state[_Block].exit()
self.state[_Function].exit()
return node
def transform(node, ctx, default_to_null_return=True):
"""Ensure a function has only a single return."""
# Note: Technically, these two could be merged into a single walk, but
# keeping them separate helps with readability.
node = ConditionalReturnRewriter(ctx).visit(node)
transformer = ReturnStatementsTransformer(
ctx, default_to_null_return=default_to_null_return)
node = transformer.visit(node)
return node
|
jhseu/tensorflow
|
tensorflow/python/autograph/converters/return_statements.py
|
Python
|
apache-2.0
| 12,595
|
[
"VisIt"
] |
b21b12d8f438c2cd3e2c553daafe4c3ddb9741cef07c66d676e8419e0a8c313e
|
# -*- coding: utf-8 -*-
#
# hl_api_simulation.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for simulation control
"""
from contextlib import contextmanager
from ..ll_api import *
from .hl_api_helper import *
from .hl_api_parallel_computing import Rank
__all__ = [
'Cleanup',
'DisableStructuralPlasticity',
'EnableStructuralPlasticity',
'GetKernelStatus',
'Install',
'Prepare',
'ResetKernel',
'Run',
'RunManager',
'SetKernelStatus',
'Simulate',
]
@check_stack
def Simulate(t):
"""Simulate the network for `t` milliseconds.
Parameters
----------
t : float
Time to simulate in ms
See Also
--------
RunManager
"""
sps(float(t))
sr('ms Simulate')
@check_stack
def Run(t):
"""Simulate the network for `t` milliseconds.
Parameters
----------
t : float
Time to simulate in ms
Notes
------
Call between `Prepare` and `Cleanup` calls, or within a
``with RunManager`` clause.
Simulate(t): t' = t/m; Prepare(); for _ in range(m): Run(t'); Cleanup()
`Prepare` must be called before `Run` to calibrate the system, and
`Cleanup` must be called after `Run` to close files, cleanup handles, and
so on. After `Cleanup`, `Prepare` can and must be called before more `Run`
calls. Any calls to `SetStatus` between `Prepare` and `Cleanup` have
undefined behaviour.
See Also
--------
Prepare, Cleanup, RunManager, Simulate
"""
sps(float(t))
sr('ms Run')
@check_stack
def Prepare():
"""Calibrate the system before a `Run` call. Not needed for `Simulate`.
Call before the first `Run` call, or before calling `Run` after changing
the system, calling `SetStatus` or `Cleanup`.
See Also
--------
Run, Cleanup
"""
sr('Prepare')
@check_stack
def Cleanup():
"""Cleans up resources after a `Run` call. Not needed for `Simulate`.
Closes state for a series of runs, such as flushing and closing files.
A `Prepare` is needed after a `Cleanup` before any more calls to `Run`.
See Also
--------
Run, Prepare
"""
sr('Cleanup')
@contextmanager
def RunManager():
"""ContextManager for `Run`
Calls `Prepare` before a series of `Run` calls, and calls `Cleanup` at end.
E.g.:
::
with RunManager():
for i in range(10):
Run()
See Also
--------
Prepare, Run, Cleanup, Simulate
"""
Prepare()
try:
yield
finally:
Cleanup()
@check_stack
def ResetKernel():
"""Reset the simulation kernel.
This will destroy the network as well as all custom models created with
:py:func:`.CopyModel`. Calling this function is equivalent to restarting NEST.
In particular,
* all network nodes
* all connections
* all user-defined neuron and synapse models
are deleted, and
* time
* random generators
are reset. The only exception is that dynamically loaded modules are not
unloaded. This may change in a future version of NEST.
"""
sr('ResetKernel')
@check_stack
def SetKernelStatus(params):
"""Set parameters for the simulation kernel.
Parameters
----------
params : dict
Dictionary of parameters to set.
**Note**
All NEST kernel parameters are described below, grouped by topic.
Some of them only provide information about the kernel status and
cannot be set by the user. These are marked as *read only* and can
be accessed using ``GetKernelStatus``.
**Time and resolution**
Parameters
----------
resolution : float
The resolution of the simulation (in ms)
time : float
The current simulation time (in ms)
to_do : int, read only
The number of steps yet to be simulated
max_delay : float
The maximum delay in the network
min_delay : float
The minimum delay in the network
ms_per_tic : float
The number of milliseconds per tic
tics_per_ms : float
The number of tics per millisecond
tics_per_step : int
The number of tics per simulation time step
T_max : float, read only
The largest representable time value
T_min : float, read only
The smallest representable time value
**Random number generators**
Parameters
----------
rng_types : list, read only
Names of random number generator types available.
rng_type : str
Name of random number generator type used by NEST.
rng_seed : int
Seed value used as base for seeding NEST random number generators
(:math:`1 \leq s \leq 2^{32}-1`).
**Parallel processing**
Parameters
----------
total_num_virtual_procs : int
The total number of virtual processes
local_num_threads : int
The local number of threads
num_processes : int, read only
The number of MPI processes
off_grid_spiking : bool
Whether to transmit precise spike times in MPI communication
**MPI buffers**
Parameters
----------
adaptive_spike_buffers : bool
Whether MPI buffers for communication of spikes resize on the fly
adaptive_target_buffers : bool
Whether MPI buffers for communication of connections resize on the fly
buffer_size_secondary_events : int, read only
Size of MPI buffers for communicating secondary events (in bytes, per
MPI rank, for developers)
buffer_size_spike_data : int
Total size of MPI buffer for communication of spikes
buffer_size_target_data : int
Total size of MPI buffer for communication of connections
growth_factor_buffer_spike_data : float
If MPI buffers for communication of spikes resize on the fly, grow
them by this factor each round
growth_factor_buffer_target_data : float
If MPI buffers for communication of connections resize on the fly, grow
them by this factor each round
max_buffer_size_spike_data : int
Maximal size of MPI buffers for communication of spikes.
max_buffer_size_target_data : int
Maximal size of MPI buffers for communication of connections
**Gap junctions and rate models (waveform relaxation method)**
Parameters
----------
use_wfr : bool
Whether to use waveform relaxation method
wfr_comm_interval : float
Desired waveform relaxation communication interval
wfr_tol : float
Convergence tolerance of waveform relaxation method
wfr_max_iterations : int
Maximal number of iterations used for waveform relaxation
wfr_interpolation_order : int
Interpolation order of polynomial used in wfr iterations
**Synapses**
Parameters
----------
max_num_syn_models : int, read only
Maximal number of synapse models supported
sort_connections_by_source : bool
Whether to sort connections by their source; increases construction
time of presynaptic data structures, decreases simulation time if the
average number of outgoing connections per neuron is smaller than the
total number of threads
structural_plasticity_synapses : dict
Defines all synapses which are plastic for the structural plasticity
algorithm. Each entry in the dictionary is composed of a synapse model,
the pre synaptic element and the postsynaptic element
structural_plasticity_update_interval : int
Defines the time interval in ms at which the structural plasticity
manager will make changes in the structure of the network (creation
and deletion of plastic synapses)
use_compressed_spikes : bool
Whether to use spike compression; if a neuron has targets on
multiple threads of a process, this switch makes sure that only
a single packet is sent to the process instead of one packet per
target thread; requires sort_connections_by_source = true
**Output**
Returns
-------
data_path : str
A path, where all data is written to (default is the current
directory)
data_prefix : str
A common prefix for all data files
overwrite_files : bool
Whether to overwrite existing data files
print_time : bool
Whether to print progress information during the simulation
network_size : int, read only
The number of nodes in the network
num_connections : int, read only, local only
The number of connections in the network
local_spike_counter : int, read only
Number of spikes fired by neurons on a given MPI rank since NEST was
started or the last ResetKernel. Only spikes from "normal" neurons
(neuron models with proxies) are counted, not spikes generated by
devices such as poisson_generator.
**Miscellaneous**
Parameters
----------
dict_miss_is_error : bool
Whether missed dictionary entries are treated as errors
keep_source_table : bool
Whether to keep source table after connection setup is complete
See Also
--------
GetKernelStatus
"""
sps(params)
sr('SetKernelStatus')
@check_stack
def GetKernelStatus(keys=None):
"""Obtain parameters of the simulation kernel.
Parameters
----------
keys : str or list, optional
Single parameter name or `list` of parameter names
Returns
-------
dict:
Parameter dictionary, if called without argument
type:
Single parameter value, if called with single parameter name
list:
List of parameter values, if called with list of parameter names
Raises
------
TypeError
If `keys` are of the wrong type.
Notes
-----
See SetKernelStatus for documentation on each parameter key.
See Also
--------
SetKernelStatus
"""
sr('GetKernelStatus')
status_root = spp()
if keys is None:
return status_root
elif is_literal(keys):
return status_root[keys]
elif is_iterable(keys):
return tuple(status_root[k] for k in keys)
else:
raise TypeError("keys should be either a string or an iterable")
@check_stack
def Install(module_name):
"""Load a dynamically linked NEST module.
Parameters
----------
module_name : str
Name of the dynamically linked module
Returns
-------
handle
NEST module identifier, required for unloading
Notes
-----
Dynamically linked modules are searched in the NEST library
directory (``<prefix>/lib/nest``) and in ``LD_LIBRARY_PATH`` (on
Linux) or ``DYLD_LIBRARY_PATH`` (on OSX).
**Example**
::
nest.Install("mymodule")
"""
return sr("(%s) Install" % module_name)
@check_stack
def EnableStructuralPlasticity():
"""Enable structural plasticity for the network simulation
See Also
--------
DisableStructuralPlasticity
"""
sr('EnableStructuralPlasticity')
@check_stack
def DisableStructuralPlasticity():
"""Disable structural plasticity for the network simulation
See Also
--------
EnableStructuralPlasticity
"""
sr('DisableStructuralPlasticity')
|
stinebuu/nest-simulator
|
pynest/nest/lib/hl_api_simulation.py
|
Python
|
gpl-2.0
| 11,956
|
[
"NEURON"
] |
dbd1b6199556bf52c113d648e1b7a2f622aac0a493c83fce7e0b2253baf27a3c
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import espressomd
from espressomd.interactions import HarmonicBond, AngleHarmonic
import numpy as np
from random import shuffle
@utx.skipIfMissingFeatures("COLLISION_DETECTION")
class CollisionDetection(ut.TestCase):
"""Tests interface and functionality of the collision detection / dynamic binding"""
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
np.random.seed(seed=42)
if espressomd.has_features("VIRTUAL_SITES_RELATIVE"):
from espressomd.virtual_sites import VirtualSitesRelative
s.virtual_sites = VirtualSitesRelative()
H = HarmonicBond(k=5000, r_0=0.1)
H2 = HarmonicBond(k=25000, r_0=0.02)
s.bonded_inter.add(H)
s.bonded_inter.add(H2)
time_step = 0.001
s.time_step = time_step
s.cell_system.skin = 0.05
s.min_global_cut = 0.112
part_type_to_attach_vs_to = 0
part_type_vs = 1
part_type_to_be_glued = 2
part_type_after_glueing = 3
other_type = 5
def get_state_set_state_consistency(self):
state = self.s.collision_detection.get_params()
self.s.collision_detection.set_params(**state)
self.assertEqual(state, self.s.collision_detection.get_params())
def test_00_interface_and_defaults(self):
# Is it off by default
self.assertEqual(self.s.collision_detection.mode, "off")
# Make sure params cannot be set individually
with self.assertRaises(Exception):
self.s.collision_detection.mode = "bind_centers"
# Verify exception throwing for unknown collision modes
for unknown_mode in (0, "unknown"):
with self.assertRaisesRegex(Exception, "Mode not handled"):
self.s.collision_detection.set_params(mode=unknown_mode)
# That should work
self.s.collision_detection.set_params(mode="off")
self.assertEqual(self.s.collision_detection.mode, "off")
def test_bind_centers(self):
# Check that it leaves particles alone, when off
self.s.collision_detection.set_params(mode="off")
self.s.part.clear()
self.s.part.add(pos=(0, 0, 0), id=0)
self.s.part.add(pos=(0.1, 0, 0), id=1)
self.s.part.add(pos=(0.1, 0.3, 0), id=2)
self.s.integrator.run(1)
self.assertEqual(self.s.part[0].bonds, ())
self.assertEqual(self.s.part[1].bonds, ())
self.assertEqual(self.s.part[2].bonds, ())
# Check that it cannot be activated
self.s.collision_detection.set_params(
mode="bind_centers", distance=0.11, bond_centers=self.H)
self.get_state_set_state_consistency()
self.s.integrator.run(1, recalc_forces=True)
bond0 = ((self.s.bonded_inter[0], 1),)
bond1 = ((self.s.bonded_inter[0], 0),)
self.assertTrue(
self.s.part[0].bonds == bond0 or self.s.part[1].bonds == bond1)
self.assertEqual(self.s.part[2].bonds, ())
# Check that no additional bonds appear
self.s.integrator.run(1)
self.assertTrue(
self.s.part[0].bonds == bond0 or self.s.part[1].bonds == bond1)
self.assertEqual(self.s.part[2].bonds, ())
# Check turning it off
self.s.collision_detection.set_params(mode="off")
self.get_state_set_state_consistency()
self.assertEqual(self.s.collision_detection.mode, "off")
def run_test_bind_at_point_of_collision_for_pos(self, *positions):
positions = list(positions)
shuffle(positions)
self.s.part.clear()
# Place particle which should not take part in collisions
p = self.s.part.add(pos=(0.1, 0.3, 0))
for pos in positions:
p1 = self.s.part.add(pos=pos + (0, 0, 0))
p2 = self.s.part.add(pos=pos + (0.1, 0, 0))
if self.s.distance(p1, p) < 0.12 or self.s.distance(p2, p) < 0.12:
raise Exception(
"Test particle too close to particle, which should not take part in collision")
# 2 non-virtual + 2 virtual + one that doesn't take part
expected_np = 4 * len(positions) + 1
self.s.collision_detection.set_params(
mode="bind_at_point_of_collision", distance=0.11, bond_centers=self.H, bond_vs=self.H2, part_type_vs=1, vs_placement=0.4)
self.get_state_set_state_consistency()
self.s.integrator.run(1, recalc_forces=True)
self.verify_state_after_bind_at_poc(expected_np)
# Integrate again and check that nothing has changed
self.s.integrator.run(1, recalc_forces=True)
self.verify_state_after_bind_at_poc(expected_np)
# Check that nothing explodes when the particles are moved.
# In particular for parallel simulations
self.s.thermostat.set_langevin(kT=0, gamma=0.01, seed=42)
self.s.part[:].v = [0.05, 0.01, 0.15]
self.s.integrator.run(3000)
self.verify_state_after_bind_at_poc(expected_np)
def verify_state_after_bind_at_poc(self, expected_np):
self.assertEqual(len(self.s.part), expected_np)
# At the end of test, this list should be empty
parts_not_accounted_for = list(range(expected_np))
# We traverse particles. We look for a vs with a bond to find the other vs.
# From the two vs we find the two non-virtual particles
for p in self.s.part:
# Skip non-virtual
if not p.virtual:
continue
# Skip vs that doesn't have a bond
if p.bonds == ():
continue
# Parse the bond
self.assertEqual(len(p.bonds), 1)
# Bond type
self.assertEqual(p.bonds[0][0], self.H2)
# get partner
p2 = self.s.part[p.bonds[0][1]]
# Is that really a vs
self.assertTrue(p2.virtual)
# Get base particles
base_p1 = self.s.part[p.vs_relative[0]]
base_p2 = self.s.part[p2.vs_relative[0]]
# Take note of accounted-for particles
for _p in p, p2, base_p1, base_p2:
parts_not_accounted_for.remove(_p.id)
self.verify_bind_at_poc_pair(base_p1, base_p2, p, p2)
# Check particle that did not take part in collision.
self.assertEqual(len(parts_not_accounted_for), 1)
p = self.s.part[parts_not_accounted_for[0]]
self.assertFalse(p.virtual)
self.assertEqual(p.bonds, ())
parts_not_accounted_for.remove(p.id)
self.assertEqual(parts_not_accounted_for, [])
def verify_bind_at_poc_pair(self, p1, p2, vs1, vs2):
bond_p1 = ((self.s.bonded_inter[0], p2.id),)
bond_p2 = ((self.s.bonded_inter[0], p1.id),)
self.assertTrue(p1.bonds == bond_p1 or p2.bonds == bond_p2)
# Check for presence of vs
# Check for bond between vs
bond_vs1 = ((self.s.bonded_inter[1], vs2.id),)
bond_vs2 = ((self.s.bonded_inter[1], vs1.id),)
self.assertTrue(vs1.bonds == bond_vs1 or vs2.bonds == bond_vs2)
# Vs properties
self.assertTrue(vs1.virtual)
self.assertTrue(vs2.virtual)
# vs_relative properties
seen = []
for p in vs1, vs2:
r = p.vs_relative
rel_to = r[0]
dist = r[1]
# Vs is related to one of the particles
self.assertIn(rel_to, (p1.id, p2.id))
# The two vs relate to two different particles
self.assertNotIn(rel_to, seen)
seen.append(rel_to)
# Check placement
if rel_to == p1.id:
dist_centers = np.copy(p2.pos - p1.pos)
else:
dist_centers = p1.pos - p2.pos
expected_pos = self.s.part[rel_to].pos_folded + \
self.s.collision_detection.vs_placement * dist_centers
dist = expected_pos - p.pos_folded
dist -= np.round(dist / self.s.box_l) * self.s.box_l
self.assertLess(np.linalg.norm(dist), 1E-12)
@utx.skipIfMissingFeatures("VIRTUAL_SITES_RELATIVE")
def test_bind_at_point_of_collision(self):
# Single collision head node
self.run_test_bind_at_point_of_collision_for_pos(np.array((0, 0, 0)))
# Single collision, mixed
self.run_test_bind_at_point_of_collision_for_pos(
np.array((0.45, 0, 0)))
# Single collision, non-head-node
self.run_test_bind_at_point_of_collision_for_pos(np.array((0.7, 0, 0)))
# head-node + mixed
self.run_test_bind_at_point_of_collision_for_pos(
np.array((0, 0, 0)), np.array((0.45, 0, 0)))
# Mixed + other node
self.run_test_bind_at_point_of_collision_for_pos(
np.array((0.45, 0, 0)), np.array((0.7, 0, 0)))
# Head + other
self.run_test_bind_at_point_of_collision_for_pos(
np.array((0.0, 0, 0)), np.array((0.7, 0, 0)))
# Head + mixed + other
self.run_test_bind_at_point_of_collision_for_pos(
np.array((0.2, 0, 0)), np.array((0.95, 0, 0)), np.array((0.7, 0, 0)))
@utx.skipIfMissingFeatures(["LENNARD_JONES", "VIRTUAL_SITES_RELATIVE"])
def test_bind_at_point_of_collision_random(self):
"""Integrate lj liquid and check that no double bonds are formed
and the number of bonds fits the number of virtual sites
"""
self.s.part.clear()
# Add randomly placed particles
self.s.part.add(pos=np.random.random((200, 3)))
# Setup Lennard-Jones
self.s.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1, sigma=0.1, cutoff=2**(1. / 6) * 0.1, shift="auto")
# Remove overlap between particles
self.s.thermostat.turn_off()
self.s.integrator.set_steepest_descent(
f_max=0,
gamma=1,
max_displacement=0.001)
while self.s.analysis.energy()["total"] > len(self.s.part):
self.s.integrator.run(10)
# Collision detection
self.s.collision_detection.set_params(
mode="bind_at_point_of_collision",
distance=0.11,
bond_centers=self.H,
bond_vs=self.H2,
part_type_vs=1,
vs_placement=0.4)
self.get_state_set_state_consistency()
# Integrate lj liquid
self.s.integrator.set_vv()
self.s.integrator.run(5000)
# Analysis
virtual_sites = self.s.part.select(virtual=True)
non_virtual = self.s.part.select(virtual=False)
# Check bonds on non-virtual particles
bonds = []
for p in non_virtual:
for bond in p.bonds:
# Sort bond partners to make them unique independently of
# which particle got the bond
bonds.append(tuple(sorted([p.id, bond[1]])))
# No duplicate bonds?
self.assertEqual(len(bonds), len(set(bonds)))
# 2 virtual sites per bond?
self.assertEqual(2 * len(bonds), len(virtual_sites))
# Find pairs of bonded virtual sites
vs_pairs = []
for p in virtual_sites:
# 0 or 1 bond on vs?
self.assertIn(len(p.bonds), [0, 1])
if len(p.bonds) == 1:
vs_pairs.append((p.id, p.bonds[0][1]))
# Number of vs pairs = number of bonds?
self.assertEqual(len(vs_pairs), len(bonds))
# Check that vs pairs and bonds agree
for vs_pair in vs_pairs:
# Get corresponding non-virtual particles
base_particles = tuple(sorted(
[self.s.part[vs_pair[0]].vs_relative[0],
self.s.part[vs_pair[1]].vs_relative[0]]))
# Is there a corresponding bond?
self.assertIn(base_particles, bonds)
# Tidy
self.s.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=0, sigma=0, cutoff=0)
def run_test_glue_to_surface_for_pos(self, *positions):
positions = list(positions)
shuffle(positions)
self.s.part.clear()
# Place particle which should not take part in collisions
# In this case, it is skipped, because it is of the wrong type,
# even if it is within range for a collision
self.s.part.add(pos=positions[0], type=self.other_type)
for pos in positions:
# Since this is non-symmetric, we randomize order
if np.random.random() > .5:
self.s.part.add(
pos=pos + (0, 0, 0), type=self.part_type_to_attach_vs_to)
self.s.part.add(
pos=pos + (0.1, 0, 0), type=self.part_type_to_be_glued)
else:
self.s.part.add(
pos=pos + (0.1, 0, 0), type=self.part_type_to_be_glued)
self.s.part.add(
pos=pos + (0, 0, 0), type=self.part_type_to_attach_vs_to)
# 2 non-virtual + 1 virtual + one that doesn't take part
expected_np = 3 * len(positions) + 1
self.s.collision_detection.set_params(
mode="glue_to_surface", distance=0.11, distance_glued_particle_to_vs=0.02, bond_centers=self.H, bond_vs=self.H2, part_type_vs=self.part_type_vs, part_type_to_attach_vs_to=self.part_type_to_attach_vs_to, part_type_to_be_glued=self.part_type_to_be_glued, part_type_after_glueing=self.part_type_after_glueing)
self.get_state_set_state_consistency()
self.s.integrator.run(1, recalc_forces=True)
self.verify_state_after_glue_to_surface(expected_np)
# Integrate again and check that nothing has changed
self.s.integrator.run(1, recalc_forces=True)
self.verify_state_after_glue_to_surface(expected_np)
# Check that nothing explodes, when the particles are moved.
# In particular for parallel simulations
self.s.thermostat.set_langevin(kT=0, gamma=0.01, seed=42)
self.s.part[:].v = [0.05, 0.01, 0.15]
self.s.integrator.run(3000)
self.verify_state_after_glue_to_surface(expected_np)
def verify_state_after_glue_to_surface(self, expected_np):
self.assertEqual(len(self.s.part), expected_np)
# At the end of test, this list should be empty
parts_not_accounted_for = list(range(expected_np))
# We traverse particles. We look for a vs, get base particle from there
# and partner particle via bonds
for p in self.s.part:
# Skip non-virtual
if not p.virtual:
continue
# The vs shouldn't have bonds
self.assertEqual(p.bonds, ())
# Get base particles
base_p = self.s.part[p.vs_relative[0]]
# Get bound particle
# There is a bond between the base particle and the bound particle
# but we have no guarantee, on where it is stored
# 1. On the base particle of the vs
p2 = None
if len(base_p.bonds) == 1:
self.assertEqual(base_p.bonds[0][0], self.H)
p2 = self.s.part[base_p.bonds[0][1]]
else:
# We need to go through all particles to find it
for candidate in self.s.part:
if candidate.id not in parts_not_accounted_for:
continue
if len(candidate.bonds) >= 1:
for b in candidate.bonds:
if b[0] == self.H and b[1] == base_p.id:
p2 = candidate
if p2 is None:
raise Exception("Bound particle not found")
# Take note of accounted-for particles
parts_not_accounted_for.remove(base_p.id)
parts_not_accounted_for.remove(p.id)
parts_not_accounted_for.remove(p2.id)
self.verify_glue_to_surface_pair(base_p, p, p2)
# Check particle that did not take part in collision.
self.assertEqual(len(parts_not_accounted_for), 1)
p = self.s.part[parts_not_accounted_for[0]]
self.assertFalse(p.virtual)
self.assertEqual(p.type, self.other_type)
self.assertEqual(p.bonds, ())
parts_not_accounted_for.remove(p.id)
self.assertEqual(parts_not_accounted_for, [])
def verify_glue_to_surface_pair(self, base_p, vs, bound_p):
# Check all types
self.assertEqual(base_p.type, self.part_type_to_attach_vs_to)
self.assertEqual(vs.type, self.part_type_vs)
self.assertEqual(bound_p.type, self.part_type_after_glueing)
# Bound particle should have a bond to vs. It can additionally have a bond
# to the base particle
bond_to_vs_found = 0
for b in bound_p.bonds:
if b[0] == self.H2:
# bond to vs
self.assertEqual(b, (self.H2, vs.id))
bond_to_vs_found += 1
self.assertEqual(bond_to_vs_found, 1)
# Vs should not have a bond
self.assertEqual(vs.bonds, ())
# Vs properties
self.assertTrue(vs.virtual)
self.assertEqual(vs.vs_relative[0], base_p.id)
# Distance vs,bound_p
self.assertAlmostEqual(self.s.distance(vs, bound_p), 0.02, places=3)
self.assertAlmostEqual(self.s.distance(base_p, bound_p), 0.1, places=3)
self.assertAlmostEqual(self.s.distance(base_p, vs), 0.08, places=3)
# base_p,vs,bound_p on a line
self.assertGreater(np.dot(self.s.distance_vec(base_p, vs), self.s.distance_vec(base_p, bound_p))
/ self.s.distance(base_p, vs) / self.s.distance(base_p, bound_p), 0.99)
@utx.skipIfMissingFeatures("VIRTUAL_SITES_RELATIVE")
def test_glue_to_surface(self):
# Single collision head node
self.run_test_glue_to_surface_for_pos(np.array((0, 0, 0)))
# Single collision, mixed
self.run_test_glue_to_surface_for_pos(np.array((0.45, 0, 0)))
# Single collision, non-head-node
self.run_test_glue_to_surface_for_pos(np.array((0.7, 0, 0)))
# head-node + mixed
self.run_test_glue_to_surface_for_pos(
np.array((0, 0, 0)), np.array((0.45, 0, 0)))
# Mixed + other node
self.run_test_glue_to_surface_for_pos(
np.array((0.45, 0, 0)), np.array((0.7, 0, 0)))
# Head + other
self.run_test_glue_to_surface_for_pos(
np.array((0.0, 0, 0)), np.array((0.7, 0, 0)))
# Head + mixed + other
self.run_test_glue_to_surface_for_pos(
np.array((0.2, 0, 0)), np.array((0.95, 0, 0)), np.array((0.7, 0, 0)))
@utx.skipIfMissingFeatures("VIRTUAL_SITES_RELATIVE")
def test_glue_to_surface_random(self):
"""Integrate lj liquid and check that no double bonds are formed
and the number of bonds fits the number of virtual sites
"""
self.s.part.clear()
# Add randomly placed particles
self.s.part.add(pos=np.random.random((100, 3)),
type=100 * [self.part_type_to_attach_vs_to])
self.s.part.add(pos=np.random.random(
(100, 3)), type=100 * [self.part_type_to_be_glued])
self.s.part.add(pos=np.random.random(
(100, 3)), type=100 * [self.other_type])
# Setup Lennard-Jones
self.s.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1, sigma=0.1, cutoff=2**(1. / 6) * 0.1, shift="auto")
# Remove overlap between particles
self.s.thermostat.turn_off()
self.s.integrator.set_steepest_descent(
f_max=0,
gamma=1,
max_displacement=0.001)
while self.s.analysis.energy()["total"] > len(self.s.part):
self.s.integrator.run(10)
# Collision detection
self.s.collision_detection.set_params(
mode="glue_to_surface", distance=0.11,
distance_glued_particle_to_vs=0.02, bond_centers=self.H,
bond_vs=self.H2, part_type_vs=self.part_type_vs,
part_type_to_attach_vs_to=self.part_type_to_attach_vs_to,
part_type_to_be_glued=self.part_type_to_be_glued,
part_type_after_glueing=self.part_type_after_glueing)
self.get_state_set_state_consistency()
# Integrate lj liquid
self.s.integrator.set_vv()
self.s.integrator.run(500)
# Analysis
virtual_sites = self.s.part.select(virtual=True)
non_virtual = self.s.part.select(virtual=False)
after_glueing = self.s.part.select(type=self.part_type_after_glueing)
# One virtual site per glued particle?
self.assertEqual(len(after_glueing), len(virtual_sites))
# Check bonds on non-virtual particles
bonds_centers = []
bonds_virtual = []
for p in non_virtual:
# Inert particles should not have bonds
if p.type == self.other_type:
self.assertEqual(len(p.bonds), 0)
# Particles that have not yet collided should not have a bond
if p.type == self.part_type_to_be_glued:
self.assertEqual(len(p.bonds), 0)
for bond in p.bonds:
# Bond type and partner type
# part_type_after_glueing can have a bond to a vs or to a
# non_virtual particle
if p.type == self.part_type_after_glueing:
self.assertIn(bond[0], (self.H, self.H2))
# Bonds to virtual sites:
if bond[0] == self.H2:
self.assertEqual(
self.s.part[bond[1]].type,
self.part_type_vs)
else:
self.assertEqual(
self.s.part[bond[1]].type,
self.part_type_to_attach_vs_to)
elif p.type == self.part_type_to_attach_vs_to:
self.assertEqual(bond[0], self.H)
self.assertEqual(
self.s.part[bond[1]].type,
self.part_type_after_glueing)
else:
print(p.id, p.type, p.bonds)
raise Exception("Particle should not have bonds. ")
# Collect bonds
# Sort bond partners to make them unique independently of
# which particle got the bond
if bond[0] == self.H:
bonds_centers.append(tuple(sorted([p.id, bond[1]])))
else:
bonds_virtual.append(tuple(sorted([p.id, bond[1]])))
# No duplicate bonds?
self.assertEqual(len(bonds_centers), len(set(bonds_centers)))
self.assertEqual(len(bonds_virtual), len(set(bonds_virtual)))
# 1 bond between centers and one between vs and glued particle
# per collision
self.assertEqual(len(bonds_virtual), len(bonds_centers))
# 1 virtual sites per bond?
self.assertEqual(len(bonds_centers), len(virtual_sites))
# no bonds on vs and vs particle type
for p in virtual_sites:
self.assertEqual(len(p.bonds), 0)
self.assertEqual(p.type, self.part_type_vs)
# Tidy
self.s.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=0, sigma=0, cutoff=0)
def test_bind_three_particles(self):
# Setup particles
self.s.part.clear()
dx = np.array((1, 0, 0))
dy = np.array((0, 1, 0))
a = np.array((0.499, 0.499, 0.499))
b = a + 0.1 * dx
c = a + 0.03 * dx + 0.03 * dy
d = a + 0.03 * dx - 0.03 * dy
e = a - 0.1 * dx
self.s.part.add(id=0, pos=a)
self.s.part.add(id=1, pos=b)
self.s.part.add(id=2, pos=c)
self.s.part.add(id=3, pos=d)
self.s.part.add(id=4, pos=e)
# Setup bonds
res = 181
for i in range(0, res, 1):
self.s.bonded_inter[i + 2] = AngleHarmonic(
bend=1, phi0=float(i) / (res - 1) * np.pi)
cutoff = 0.11
self.s.collision_detection.set_params(
mode="bind_three_particles", bond_centers=self.H,
bond_three_particles=2, three_particle_binding_angle_resolution=res, distance=cutoff)
self.get_state_set_state_consistency()
self.s.time_step = 1E-6
self.s.integrator.run(1, recalc_forces=True)
self.verify_triangle_binding(cutoff, self.s.bonded_inter[2], res)
# Make sure no extra bonds appear
self.s.integrator.run(1, recalc_forces=True)
self.verify_triangle_binding(cutoff, self.s.bonded_inter[2], res)
# Place the particles in two steps and make sure, the bonds are the
# same
self.s.part.clear()
self.s.part.add(id=0, pos=a)
self.s.part.add(id=2, pos=c)
self.s.part.add(id=3, pos=d)
self.s.integrator.run(1, recalc_forces=True)
self.s.part.add(id=4, pos=e)
self.s.part.add(id=1, pos=b)
self.s.cell_system.set_domain_decomposition()
self.s.integrator.run(1, recalc_forces=True)
self.verify_triangle_binding(cutoff, self.s.bonded_inter[2], res)
self.s.cell_system.set_n_square()
self.s.part[:].bonds = ()
self.s.integrator.run(1, recalc_forces=True)
self.verify_triangle_binding(cutoff, self.s.bonded_inter[2], res)
self.s.time_step = self.time_step
def verify_triangle_binding(self, distance, first_bond, angle_res):
# Gather pairs
n = len(self.s.part)
angle_res = angle_res - 1
expected_pairs = []
for i in range(n):
for j in range(i + 1, n, 1):
if self.s.distance(self.s.part[i], self.s.part[j]) <= distance:
expected_pairs.append((i, j))
# Find triangles
# Each element is a particle id, a bond id and two bond partners in
# ascending order
expected_angle_bonds = []
for i in range(n):
for j in range(i + 1, n, 1):
for k in range(j + 1, n, 1):
# Ref to particles
p_i = self.s.part[i]
p_j = self.s.part[j]
p_k = self.s.part[k]
# Normalized distance vectors
d_ij = np.copy(p_j.pos - p_i.pos)
d_ik = np.copy(p_k.pos - p_i.pos)
d_jk = np.copy(p_k.pos - p_j.pos)
d_ij /= np.linalg.norm(d_ij)
d_ik /= np.linalg.norm(d_ik)
d_jk /= np.linalg.norm(d_jk)
if self.s.distance(p_i, p_j) <= distance and self.s.distance(
p_i, p_k) <= distance:
id_i = first_bond._bond_id + \
int(np.round(
np.arccos(np.dot(d_ij, d_ik)) * angle_res / np.pi))
expected_angle_bonds.append((i, id_i, j, k))
if self.s.distance(p_i, p_j) <= distance and self.s.distance(
p_j, p_k) <= distance:
id_j = first_bond._bond_id + \
int(np.round(
np.arccos(np.dot(-d_ij, d_jk)) * angle_res / np.pi))
expected_angle_bonds.append((j, id_j, i, k))
if self.s.distance(p_i, p_k) <= distance and self.s.distance(
p_j, p_k) <= distance:
id_k = first_bond._bond_id + \
int(np.round(
np.arccos(np.dot(-d_ik, -d_jk)) * angle_res / np.pi))
expected_angle_bonds.append((k, id_k, i, j))
# Gather actual pairs and actual triangles
found_pairs = []
found_angle_bonds = []
for i in range(n):
for b in self.s.part[i].bonds:
if len(b) == 2:
self.assertEqual(b[0]._bond_id, self.H._bond_id)
found_pairs.append(tuple(sorted((i, b[1]))))
elif len(b) == 3:
partners = sorted(b[1:])
found_angle_bonds.append(
(i, b[0]._bond_id, partners[0], partners[1]))
else:
raise Exception(
"There should be only 2 and three particle bonds")
# The order between expected and found bonds does not always match
# because collisions occur in random order. Sort stuff
found_pairs = sorted(found_pairs)
found_angle_bonds = sorted(found_angle_bonds)
expected_angle_bonds = sorted(expected_angle_bonds)
self.assertEqual(expected_pairs, found_pairs)
if expected_angle_bonds != found_angle_bonds:
# Verbose info
print("expected:", expected_angle_bonds)
missing = []
for b in expected_angle_bonds:
if b in found_angle_bonds:
found_angle_bonds.remove(b)
else:
missing.append(b)
print("missing", missing)
print("extra:", found_angle_bonds)
print()
self.assertEqual(expected_angle_bonds, found_angle_bonds)
def test_zz_serialization(self):
self.s.collision_detection.set_params(
mode="bind_centers", distance=0.11, bond_centers=self.H)
reduce = self.s.collision_detection.__reduce__()
res = reduce[0](reduce[1][0])
self.assertEqual(res.__class__.__name__, "CollisionDetection")
self.assertEqual(res.mode, "bind_centers")
self.assertAlmostEqual(res.distance, 0.11, delta=1E-12)
self.assertEqual(res.bond_centers, self.H)
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/python/collision_detection.py
|
Python
|
gpl-3.0
| 30,592
|
[
"ESPResSo"
] |
a992735212d6748128c1494739ec2f3d9728ee21e9377fdb07f05d850c19d655
|
# This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from sqlalchemy.orm import aliased
from sqlalchemy.sql import desc
from sqlalchemy.sql.functions import count
from Core.db import session
from Core.maps import Updates, Galaxy, Planet, Alliance, User, Intel, FleetScan
from Core.loadable import loadable, route, require_planet
from Core.config import Config
from Core.paconf import PA
class topcunts(loadable):
"""Top planets attacking the specified target"""
usage = " [x:y[:z]|alliance|user]"
access = "member"
@route(loadable.coord)
def planet_galaxy(self, message, user, params):
# Planet
if params.group(5) is not None:
planet = Planet.load(*params.group(1,3,5))
if planet is None:
message.reply("No planet with coords %s:%s:%s found" % params.group(1,3,5))
else:
self.execute(message, planet=planet)
# Galaxy
else:
galaxy = Galaxy.load(*params.group(1,3))
if galaxy is None:
message.reply("No galaxy with coords %s:%s" % params.group(1,3))
else:
self.execute(message, galaxy=galaxy)
@route(r"(\S+)")
def user_alliance(self, message, user, params):
alliance = Alliance.load(params.group(1))
if alliance is None:
u = User.load(name=params.group(1), exact=False, access="member")
if u is None:
message.reply("No alliance or user matching '%s' found" % (params.group(1),))
elif u.planet is None:
message.reply("User %s has not entered their planet details" % (u.name,))
else:
planet = u.planet
self.execute(message, planet=planet)
else:
self.execute(message, alliance=alliance)
@route(r"")
@require_planet
def me(self, message, user, params):
self.execute(message, planet=user.planet)
def execute(self, message, planet=None, galaxy=None, alliance=None):
tick = Updates.current_tick()
target = aliased(Planet)
target_intel = aliased(Intel)
owner = aliased(Planet)
owner_intel = aliased(Intel)
Q = session.query(owner.x, owner.y, owner.z, count())
Q = Q.join((FleetScan.owner, owner))
Q = Q.join((FleetScan.target, target))
Q = Q.filter(FleetScan.mission == "Attack")
if planet:
Q = Q.filter(FleetScan.target == planet)
if galaxy:
Q = Q.filter(target.galaxy == galaxy)
if alliance:
Q = Q.join((target.intel, target_intel))
Q = Q.filter(target_intel.alliance == alliance)
Q = Q.group_by(owner.x, owner.y, owner.z)
Q = Q.order_by(desc(count()))
result = Q.all()
if len(result) < 1:
reply="No fleets found targetting"
if planet:
reply+=" coords %s:%s:%s"%(planet.x,planet.y,planet.z)
if galaxy:
reply+=" coords %s:%s"%(galaxy.x,galaxy.y)
if alliance:
reply+=" alliance %s"%(alliance.name,)
message.reply(reply)
return
reply = "Top attackers on"
if planet:
reply+=" coords %s:%s:%s"%(planet.x,planet.y,planet.z)
if galaxy:
reply+=" coords %s:%s"%(galaxy.x,galaxy.y)
if alliance:
reply+=" alliance %s"%(alliance.name,)
reply+=" are (total: %s) "%(sum([attacks for x,y,z, attacks in result]),)
prev = []
for x, y, z, attacks in result[:5]:
prev.append("%s:%s:%s - %s"%(x,y,z,attacks))
message.reply(reply+" | ".join(prev))
|
ellonweb/merlin
|
Hooks/victim/topcunts.py
|
Python
|
gpl-2.0
| 4,678
|
[
"Galaxy"
] |
3751c80392433038a0b39c01c4cc0675ecc30a04f23c70a687fd57f52982b0ea
|
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
import datetime
import ddt
import random
import pytz
import io
import json
import requests
import shutil
import tempfile
from urllib import quote
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponse
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.utils.timezone import utc
from django.utils.translation import ugettext as _
from mock import Mock, patch
from nose.tools import raises
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from course_modes.models import CourseMode
from courseware.models import StudentModule
from courseware.tests.factories import StaffFactory, InstructorFactory, BetaTesterFactory, UserProfileFactory
from courseware.tests.helpers import LoginEnrollmentTestCase
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
from microsite_configuration import microsite
from shoppingcart.models import (
RegistrationCodeRedemption, Order, CouponRedemption,
PaidCourseRegistration, Coupon, Invoice, CourseRegistrationCode, CourseRegistrationCodeInvoiceItem,
InvoiceTransaction)
from shoppingcart.pdf import PDFInvoice
from student.models import (
CourseEnrollment, CourseEnrollmentAllowed, NonExistentCourseError,
ManualEnrollmentAudit, UNENROLLED_TO_ENROLLED, ENROLLED_TO_UNENROLLED,
ALLOWEDTOENROLL_TO_UNENROLLED, ENROLLED_TO_ENROLLED, UNENROLLED_TO_ALLOWEDTOENROLL,
UNENROLLED_TO_UNENROLLED, ALLOWEDTOENROLL_TO_ENROLLED
)
from student.tests.factories import UserFactory, CourseModeFactory, AdminFactory
from student.roles import CourseBetaTesterRole, CourseSalesAdminRole, CourseFinanceAdminRole, CourseInstructorRole
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.fields import Date
from courseware.models import StudentFieldOverride
import instructor_task.api
import instructor.views.api
from instructor.views.api import require_finance_admin
from instructor.tests.utils import FakeContentTask, FakeEmail, FakeEmailInfo
from instructor.views.api import _split_input_list, common_exceptions_400, generate_unique_password
from instructor_task.api_helper import AlreadyRunningError
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohort_settings
from .test_tools import msk_from_problem_urlname
DATE_FIELD = Date()
EXPECTED_CSV_HEADER = (
'"code","redeem_code_url","course_id","company_name","created_by","redeemed_by","invoice_id","purchaser",'
'"customer_reference_number","internal_reference"'
)
EXPECTED_COUPON_CSV_HEADER = '"Coupon Code","Course Id","% Discount","Description","Expiration Date",' \
'"Is Active","Code Redeemed Count","Total Discounted Seats","Total Discounted Amount"'
# ddt data for test cases involving reports
REPORTS_DATA = (
{
'report_type': 'grade',
'instructor_api_endpoint': 'calculate_grades_csv',
'task_api_endpoint': 'instructor_task.api.submit_calculate_grades_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrolled learner profile',
'instructor_api_endpoint': 'get_students_features',
'task_api_endpoint': 'instructor_task.api.submit_calculate_students_features_csv',
'extra_instructor_api_kwargs': {'csv': '/csv'}
},
{
'report_type': 'detailed enrollment',
'instructor_api_endpoint': 'get_enrollment_report',
'task_api_endpoint': 'instructor_task.api.submit_detailed_enrollment_features_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrollment',
'instructor_api_endpoint': 'get_students_who_may_enroll',
'task_api_endpoint': 'instructor_task.api.submit_calculate_may_enroll_csv',
'extra_instructor_api_kwargs': {},
}
)
# ddt data for test cases involving executive summary report
EXECUTIVE_SUMMARY_DATA = (
{
'report_type': 'executive summary',
'instructor_api_endpoint': 'get_exec_summary_report',
'task_api_endpoint': 'instructor_task.api.submit_executive_summary_report',
'extra_instructor_api_kwargs': {}
},
)
@common_exceptions_400
def view_success(request): # pylint: disable=unused-argument
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=unused-argument
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=unused-argument
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
@attr('shard_1')
class TestCommonExceptions400(TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
super(TestCommonExceptions400, self).setUp()
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@attr('shard_1')
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorAPIDenyLevels(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
def setUp(self):
super(TestInstructorAPIDenyLevels, self).setUp()
self.course = CourseFactory.create()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment',
{'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership',
{'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('send_email', {'send_to': 'staff', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('list_financial_report_downloads', {}),
('calculate_grades_csv', {}),
('get_students_features', {}),
('get_enrollment_report', {}),
('get_students_who_may_enroll', {}),
('get_exec_summary_report', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})
if endpoint in ['send_email', 'students_update_enrollment', 'bulk_beta_modify_access']:
response = self.client.post(url, args)
else:
response = self.client.get(url, args)
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course_key=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(staff_member)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'list_forum_members']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course_key=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(inst)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
@attr('shard_1')
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
class TestInstructorAPIBulkAccountCreationAndEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test Bulk account creation and enrollment from csv file
"""
def setUp(self):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, self).setUp()
self.request = RequestFactory().request()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.url = reverse('register_and_enroll_students', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.not_enrolled_student = UserFactory(
username='NotEnrolledStudent',
email='nonenrolled@test.com',
first_name='NotEnrolled',
last_name='Student'
)
@patch('instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv_with_blank_lines(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "\ntest_student@example.com,test_student_1,tester1,USA\n\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('instructor.views.api.log.info')
def test_email_and_username_already_exist(self, info_log):
"""
If the email address and username already exists
and the user is enrolled in the course, do nothing (including no email gets sent out)
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with(
u"user already exists with username '%s' and email '%s'",
'test_student_1',
'test_student@example.com'
)
def test_file_upload_type_not_csv(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.jpg", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Make sure that the file you upload is in CSV format with no extraneous characters or rows.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_bad_file_upload_type(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.csv", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Could not read uploaded file.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_insufficient_data(self):
"""
Try uploading a CSV file which does not have the exact four columns of data
"""
csv_content = "test_student@example.com,test_student_1\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 1)
self.assertEquals(data['general_errors'][0]['response'], 'Data in row #1 must have exactly four columns: email, username, full name, and country')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_invalid_email_in_csv(self):
"""
Test failure case of a poorly formatted email field
"""
csv_content = "test_student.example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
data = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Invalid email {0}.'.format('test_student.example.com'))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@patch('instructor.views.api.log.info')
def test_csv_user_exist_and_not_enrolled(self, info_log):
"""
If the email address and username already exists
and the user is not enrolled in the course, enrolled him/her and iterate to next one.
"""
csv_content = "nonenrolled@test.com,NotEnrolledStudent,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
info_log.assert_called_with(
u'user %s enrolled in the course %s',
u'NotEnrolledStudent',
self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_already_existing_email_in_csv(self):
"""
If the email address already exists, but the username is different,
assume it is the correct user and just register the user in the course.
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_2,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
warning_message = 'An account with email {email} exists but the provided username {username} ' \
'is different. Enrolling anyway with {email}.'.format(email='test_student@example.com', username='test_student_2')
self.assertNotEquals(len(data['warnings']), 0)
self.assertEquals(data['warnings'][0]['response'], warning_message)
user = User.objects.get(email='test_student@example.com')
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_already_existing_username_in_csv(self):
"""
If the username already exists (but not the email),
assume it is a different user and fail to create the new account.
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
def test_csv_file_not_attached(self):
"""
Test when the user does not attach a file
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'file_not_found': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'File is not attached.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_raising_exception_in_auto_registration_and_enrollment_case(self):
"""
Test that exceptions are handled well
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
with patch('instructor.views.api.create_and_enroll_user') as mock:
mock.side_effect = NonExistentCourseError()
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'NonExistentCourseError')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_generate_unique_password(self):
"""
generate_unique_password should generate a unique password string that excludes certain characters.
"""
password = generate_unique_password([], 12)
self.assertEquals(len(password), 12)
for letter in password:
self.assertNotIn(letter, 'aAeEiIoOuU1l')
def test_users_created_and_enrolled_successfully_if_others_fail(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student3@example.com,test_student_1,tester3,CA\n" \
"test_student2@example.com,test_student_2,tester2,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
self.assertTrue(User.objects.filter(username='test_student_1', email='test_student1@example.com').exists())
self.assertTrue(User.objects.filter(username='test_student_2', email='test_student2@example.com').exists())
self.assertFalse(User.objects.filter(email='test_student3@example.com').exists())
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
@patch.object(instructor.views.api, 'generate_random_string',
Mock(side_effect=['first', 'first', 'second']))
def test_generate_unique_password_no_reuse(self):
"""
generate_unique_password should generate a unique password string that hasn't been generated before.
"""
generated_password = ['first']
password = generate_unique_password(generated_password, 12)
self.assertNotEquals(password, 'first')
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': False})
def test_allow_automated_signups_flag_not_set(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEquals(response.status_code, 403)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@attr('shard_1')
@ddt.ddt
class TestInstructorAPIEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
def setUp(self):
super(TestInstructorAPIEnrollment, self).setUp()
self.request = RequestFactory().request()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled',
last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.about_path = '/courses/{}/about'.format(self.course.id)
self.course_path = '/courses/{}/'.format(self.course.id)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll',
'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll',
'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
@ddt.data('http', 'https')
def test_enroll_with_email(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been enrolled in {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in {} "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student".format(
self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the "
"registration form making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit {proto}://{site}{about_path} to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name, proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@ddt.data('http', 'https')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in {display_name}.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name, proto=protocol, site=self.site_name
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered_autoenroll(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account,"
" you will see {display_name} listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, display_name=self.course.display_name
)
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in {display_name} "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student".format(
display_name=self.course.display_name,
)
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ALLOWEDTOENROLL_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course {display_name} by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org".format(
display_name=self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{about_path} and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, about_path=self.about_path,
display_name=self.course.display_name,
)
)
@patch('instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.post(url, {'identifiers': self.notregistered_email, 'action': 'enroll',
'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{course_path} and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
def test_enroll_already_enrolled_student(self):
"""
Ensure that already enrolled "verified" students cannot be downgraded
to "honor"
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# make this enrollment "verified"
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
# now re-enroll the student through the instructor dash
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
# affirm that the student is still in "verified" mode
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_ENROLLED)
self.assertEqual(course_enrollment.mode, u"verified")
def create_paid_course(self):
"""
create paid course mode.
"""
paid_course = CourseFactory.create()
CourseModeFactory.create(course_id=paid_course.id, min_price=50)
CourseInstructorRole(paid_course.id).add_users(self.instructor)
return paid_course
def test_reason_field_should_not_be_empty(self):
"""
test to check that reason field should not be empty when
manually enrolling the students for the paid courses.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"error": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenrolled_allowed_to_enroll_user(self):
"""
test to unenroll allow to enroll user.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing..'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# now registered the user
UserFactory(email=self.notregistered_email)
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
self.assertEqual(manual_enrollments[1].state_transition, ALLOWEDTOENROLL_TO_ENROLLED)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": True,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": True,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenrolled_already_not_enrolled_user(self):
"""
test unenrolled user already not enrolled in a course.
"""
paid_course = self.create_paid_course()
course_enrollment = CourseEnrollment.objects.filter(
user__email=self.notregistered_email, course_id=paid_course.id
)
self.assertEqual(course_enrollment.count(), 0)
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'unenroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing'}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenroll_and_enroll_verified(self):
"""
Test that unenrolling and enrolling a student from a verified track
results in that student being in an honor track
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# upgrade enrollment
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
self._change_student_enrollment(self.enrolled_student, self.course, 'unenroll')
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
self.assertEqual(course_enrollment.mode, u'honor')
def _change_student_enrollment(self, user, course, action):
"""
Helper function that posts to 'students_update_enrollment' to change
a student's enrollment
"""
url = reverse(
'students_update_enrollment',
kwargs={'course_id': course.id.to_deprecated_string()},
)
params = {
'identifiers': user.email,
'action': action,
'email_students': True,
'reason': 'change user enrollment'
}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
return response
@attr('shard_1')
@ddt.ddt
class TestInstructorAPIBulkBetaEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
def setUp(self):
super(TestInstructorAPIBulkBetaEnrollment, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
self.request = RequestFactory().request()
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.about_path = '/courses/{}/about'.format(self.course.id)
self.course_path = '/courses/{}/'.format(self.course.id)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
@ddt.data('http', 'https')
def test_add_notenrolled_with_email(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"Visit {proto}://{site}{about_path} to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
about_path=self.about_path
)
)
@ddt.data('http', 'https')
def test_add_notenrolled_with_email_autoenroll(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
course_path=self.course_path
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {}\n\nYou have been invited to be a beta tester "
"for {} at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {}".format(
self.notenrolled_student.profile.name,
self.course.display_name,
self.notenrolled_student.email,
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been removed from a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"{display_name} at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
display_name=self.course.display_name,
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@attr('shard_1')
class TestInstructorAPILevelsAccess(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
def setUp(self):
super(TestInstructorAPILevelsAccess, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course_key=self.course.id)
self.other_staff = StaffFactory(course_key=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'robot-not-a-rolename',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'staff',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'beta',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
for user in [self.instructor, self.other_user]:
for identifier_attr in [user.email, user.username]:
for rolename in ["Administrator", "Moderator", "Community TA"]:
for action in ["allow", "revoke"]:
self.assert_update_forum_role_membership(user, identifier_attr, rolename, action)
def assert_update_forum_role_membership(self, current_user, identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(
url,
{
'unique_student_identifier': identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = current_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@attr('shard_1')
@ddt.ddt
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestInstructorAPILevelsDataDump(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
def setUp(self):
super(TestInstructorAPILevelsDataDump, self).setUp()
self.course = CourseFactory.create()
self.course_mode = CourseMode(course_id=self.course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=40)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.cart = Order.get_cart_for_user(self.instructor)
self.coupon_code = 'abcde'
self.coupon = Coupon(code=self.coupon_code, description='testing code', course_id=self.course.id,
percentage_discount=10, created_by=self.instructor, is_active=True)
self.coupon.save()
# Create testing invoice 1
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
self.students_who_may_enroll = self.students + [UserFactory() for _ in range(5)]
for student in self.students_who_may_enroll:
CourseEnrollmentAllowed.objects.create(
email=student.email, course_id=self.course.id
)
def register_with_redemption_code(self, user, code):
"""
enroll user using a registration code
"""
redeem_url = reverse('register_code_redemption', args=[code])
self.client.login(username=user.username, password='test')
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
def test_invalidate_sale_record(self):
"""
Testing the sale invalidating scenario.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
data = {'invoice_number': self.sale_invoice_1.id, 'event_type': "invalidate"}
url = reverse('sale_validation', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url, method="POST", data=data)
#Now try to fetch data against not existing invoice number
test_data_1 = {'invoice_number': 100, 'event_type': "invalidate"}
self.assert_request_status_code(404, url, method="POST", data=test_data_1)
# Now invalidate the same invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("The sale associated with this invoice has already been invalidated.", response.content)
# now re_validate the invoice number
data['event_type'] = "re_validate"
self.assert_request_status_code(200, url, method="POST", data=data)
# Now re_validate the same active invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("This invoice is already active.", response.content)
test_data_2 = {'invoice_number': self.sale_invoice_1.id}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_2)
self.assertIn("Missing required event_type parameter", response.content)
test_data_3 = {'event_type': "re_validate"}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_3)
self.assertIn("Missing required invoice_number parameter", response.content)
# submitting invalid invoice number
data['invoice_number'] = 'testing'
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("invoice_number must be an integer, {value} provided".format(value=data['invoice_number']), response.content)
def test_get_sale_order_records_features_csv(self):
"""
Test that the response from get_sale_order_records is in csv format.
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
self.cart.order_type = 'business'
self.cart.save()
self.cart.add_billing_details(company_name='Test Company', company_contact_name='Test',
company_contact_email='test@123', recipient_name='R1',
recipient_email='', customer_reference_number='PO#23')
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': paid_course_reg_item.id, 'qty': '4'})
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': coupon.code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
# get the updated item
item = self.cart.orderitem_set.all().select_subclasses()[0]
# get the redeemed coupon information
coupon_redemption = CouponRedemption.objects.select_related('coupon').filter(order=self.cart)
sale_order_url = reverse('get_sale_order_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(sale_order_url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertIn('36', response.content.split('\r\n')[1])
self.assertIn(str(item.unit_cost), response.content.split('\r\n')[1],)
self.assertIn(str(item.list_price), response.content.split('\r\n')[1],)
self.assertIn(item.status, response.content.split('\r\n')[1],)
self.assertIn(coupon_redemption[0].coupon.code, response.content.split('\r\n')[1],)
def test_coupon_redeem_count_in_ecommerce_section(self):
"""
Test that checks the redeem count in the instructor_dashboard coupon section
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
# Coupon Redeem Count only visible for Financial Admins.
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': coupon.code})
self.assertEqual(resp.status_code, 200)
# URL for instructor dashboard
instructor_dashboard = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
# visit the instructor dashboard page and
# check that the coupon redeem count should be 0
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>0</td>', resp.content)
# now make the payment of your cart items
self.cart.purchase()
# visit the instructor dashboard page and
# check that the coupon redeem count should be 1
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>1</td>', resp.content)
def test_get_sale_records_features_csv(self):
"""
Test that the response from get_sale_records is in csv format.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse(
'get_sale_records',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_sale_records_features_json(self):
"""
Test that the response from get_sale_records is in json format.
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
for res in res_json['sale']:
self.validate_sale_records_response(
res,
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
def test_get_sale_records_features_with_multiple_invoices(self):
"""
Test that the response from get_sale_records is in json format for multiple invoices
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='qwerty{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
# Create test invoice 2
sale_invoice_2 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw_2', recipient_email='test2@test.com', customer_reference_number='2Fwe23S',
internal_reference="B", course_id=self.course.id
)
invoice_item_2 = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=sale_invoice_2,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='xyzmn{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=sale_invoice_2, invoice_item=invoice_item_2, mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
self.validate_sale_records_response(
res_json['sale'][0],
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
self.validate_sale_records_response(
res_json['sale'][1],
course_registration_code,
sale_invoice_2,
0,
invoice_item=invoice_item_2
)
def validate_sale_records_response(self, res, course_registration_code, invoice, used_codes, invoice_item):
"""
validate sale records attribute values with the response object
"""
self.assertEqual(res['total_amount'], invoice.total_amount)
self.assertEqual(res['recipient_email'], invoice.recipient_email)
self.assertEqual(res['recipient_name'], invoice.recipient_name)
self.assertEqual(res['company_name'], invoice.company_name)
self.assertEqual(res['company_contact_name'], invoice.company_contact_name)
self.assertEqual(res['company_contact_email'], invoice.company_contact_email)
self.assertEqual(res['internal_reference'], invoice.internal_reference)
self.assertEqual(res['customer_reference_number'], invoice.customer_reference_number)
self.assertEqual(res['invoice_number'], invoice.id)
self.assertEqual(res['created_by'], course_registration_code.created_by.username)
self.assertEqual(res['course_id'], invoice_item.course_id.to_deprecated_string())
self.assertEqual(res['total_used_codes'], used_codes)
self.assertEqual(res['total_codes'], 5)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
@ddt.data(True, False)
def test_get_students_features_cohorted(self, is_cohorted):
"""
Test that get_students_features includes cohort info when the course is
cohorted, and does not when the course is not cohorted.
"""
url = reverse('get_students_features', kwargs={'course_id': unicode(self.course.id)})
set_course_cohort_settings(self.course.id, is_cohorted=is_cohorted)
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertEqual('cohort' in res_json['feature_names'], is_cohorted)
def test_get_students_who_may_enroll(self):
"""
Test whether get_students_who_may_enroll returns an appropriate
status message when users request a CSV file of students who
may enroll in a course.
"""
url = reverse(
'get_students_who_may_enroll',
kwargs={'course_id': unicode(self.course.id)}
)
# Successful case:
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertNotIn('currently being created', res_json['status'])
# CSV generation already in progress:
with patch('instructor_task.api.submit_calculate_may_enroll_csv') as submit_task_function:
error = AlreadyRunningError()
submit_task_function.side_effect = error
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertIn('currently being created', res_json['status'])
def test_access_course_finance_admin_with_invalid_course_key(self):
"""
Test assert require_course fiance_admin before generating
a detailed enrollment report
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'invalid_course_key')
self.assertEqual(response.status_code, 404)
self.assertFalse(func.called)
def mock_request(self):
"""
mock request
"""
request = Mock()
request.user = self.instructor
return request
def test_access_course_finance_admin_with_valid_course_key(self):
"""
Test to check the course_finance_admin role with valid key
but doesn't have access to the function
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'valid/course/key')
self.assertEqual(response.status_code, 403)
self.assertFalse(func.called)
def test_add_user_to_fiance_admin_role_with_valid_course(self):
"""
test to check that a function is called using a fiance_admin
rights.
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
decorated_func(request, self.course.id.to_deprecated_string())
self.assertTrue(func.called)
def test_enrollment_report_features_csv(self):
"""
test to generate enrollment report.
enroll users, admin staff using registration codes.
"""
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
admin_user = AdminFactory()
admin_cart = Order.get_cart_for_user(admin_user)
PaidCourseRegistration.add_to_order(admin_cart, self.course.id)
admin_cart.purchase()
# create a new user/student and enroll
# in the course using a registration code
# and then validates the generated detailed enrollment report
test_user = UserFactory()
self.register_with_redemption_code(test_user, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
UserProfileFactory.create(user=self.students[0], meta='{"company": "asdasda"}')
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_bulk_purchase_detailed_report(self):
"""
test to generate detailed enrollment report.
1 Purchase registration codes.
2 Enroll users via registration code.
3 Validate generated enrollment report.
"""
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'),
{'ItemId': paid_course_reg_item.id, 'qty': '4'})
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=self.cart)
self.register_with_redemption_code(self.instructor, course_reg_codes[0].code)
test_user = UserFactory()
test_user_cart = Order.get_cart_for_user(test_user)
PaidCourseRegistration.add_to_order(test_user_cart, self.course.id)
test_user_cart.purchase()
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=-self.sale_invoice_1.total_amount,
status='refunded',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_create_registration_code_without_invoice_and_order(self):
"""
test generate detailed enrollment report,
used a registration codes which has been created via invoice or bulk
purchase scenario.
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_invoice_payment_is_still_pending_for_registration_codes(self):
"""
test generate enrollment report
enroll a user in a course using registration code
whose invoice has not been paid yet
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
@patch.object(instructor.views.api, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(instructor.views.api, 'unique_id_for_user', Mock(return_value='41'))
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(
'"User ID","Anonymized User ID","Course Specific Anonymized User ID"'
'\n"3","41","42"\n'
))
self.assertTrue(body.endswith('"8","41","42"\n'))
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.models.LocalFSReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.get(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
@ddt.data(*REPORTS_DATA)
@ddt.unpack
def test_calculate_report_csv_success(self, report_type, instructor_api_endpoint, task_api_endpoint, extra_instructor_api_kwargs):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint):
response = self.client.get(url, {})
success_status = "The {report_type} report is being created.".format(report_type=report_type)
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_success(
self,
report_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint):
response = self.client.get(url, {})
success_status = "The {report_type} report is being created." \
" To view the status of the report, see Pending" \
" Instructor Tasks" \
" below".format(report_type=report_type)
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_already_running(
self,
report_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint) as mock:
mock.side_effect = AlreadyRunningError()
response = self.client.get(url, {})
already_running_status = "The {report_type} report is currently being created." \
" To view the status of the report, see Pending Instructor Tasks below." \
" You will be able to download the report" \
" when it is" \
" complete.".format(report_type=report_type)
self.assertIn(already_running_status, response.content)
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].email.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].username.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
@attr('shard_1')
class TestInstructorAPIRegradeTask(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
def setUp(self):
super(TestInstructorAPIRegradeTask, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_id=self.module_to_reset.module_id,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_course_has_entrance_exam_in_student_attempts_reset(self):
""" Test course has entrance exam id set while resetting attempts"""
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
'delete_module': False,
})
self.assertEqual(response.status_code, 400)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test course has entrance exam id set while re-scoring. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
@attr('shard_1')
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
class TestEntranceExamInstructorAPIRegradeTask(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can rescore student grades,
reset student attempts and delete state for entrance exam.
"""
def setUp(self):
super(TestEntranceExamInstructorAPIRegradeTask, self).setUp()
self.course = CourseFactory.create(
org='test_org',
course='test_course',
run='test_run',
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
self.course_with_invalid_ee = CourseFactory.create(entrance_exam_id='invalid_exam')
self.instructor = InstructorFactory(course_key=self.course.id)
# Add instructor to invalid ee course
CourseInstructorRole(self.course_with_invalid_ee.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.entrance_exam = ItemFactory.create(
parent=self.course,
category='chapter',
display_name='Entrance exam'
)
subsection = ItemFactory.create(
parent=self.entrance_exam,
category='sequential',
display_name='Subsection 1'
)
vertical = ItemFactory.create(
parent=subsection,
category='vertical',
display_name='Vertical 1'
)
self.ee_problem_1 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 1"
)
self.ee_problem_2 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 2"
)
ee_module_to_reset1 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_1.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
ee_module_to_reset2 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_2.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
self.ee_modules = [ee_module_to_reset1.module_state_key, ee_module_to_reset2.module_state_key]
def test_reset_entrance_exam_student_attempts_deletall(self):
""" Make sure no one can delete all students state on entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_entrance_exam_student_attempts_single(self):
""" Test reset single student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
for changed_module in changed_modules:
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_in_entrance_exam')
def test_reset_entrance_exam_all_student_attempts(self, act):
""" Test reset all student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_invalid_entrance_exam(self):
""" Test reset for invalid entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_entrance_exam_sttudent_delete_state(self):
""" Test delete single student entrance exam state. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
self.assertEqual(changed_modules.count(), 0)
def test_entrance_exam_delete_state_with_staff(self):
""" Test entrance exam delete state failure with staff access. """
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 403)
def test_entrance_exam_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_entrance_exam_for_student')
def test_rescore_entrance_exam_single_student(self, act):
""" Test re-scoring of entrance exam for single student. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_rescore_entrance_exam_all_student(self):
""" Test rescoring for all students. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_all_student_and_single(self):
""" Test re-scoring with both all students and single student parameters. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test re-scoring of entrance exam with invalid exam. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_list_entrance_exam_instructor_tasks_student(self):
""" Test list task history for entrance exam AND student. """
# create a re-score entrance exam task
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0]['status'], _('Complete'))
def test_list_entrance_exam_instructor_tasks_all_student(self):
""" Test list task history for entrance exam AND all student. """
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 0)
def test_list_entrance_exam_instructor_with_invalid_exam_key(self):
""" Test list task history for entrance exam failure if course has invalid exam. """
url = reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_skip_entrance_exam_student(self):
""" Test skip entrance exam api for student. """
# create a re-score entrance exam task
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
message = _('This student (%s) will skip the entrance exam.') % self.student.email
self.assertContains(response, message)
# post again with same student
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
# This time response message should be different
message = _('This student (%s) is already allowed to skip the entrance exam.') % self.student.email
self.assertContains(response, message)
@attr('shard_1')
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorSendEmail(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
def setUp(self):
super(TestInstructorSendEmail, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
self.full_test_message = {
'send_to': 'staff',
'subject': test_subject,
'message': test_message,
}
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@attr('shard_1')
class TestInstructorAPITaskLists(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
def setUp(self):
super(TestInstructorAPITaskLists, self).setUp()
self.course = CourseFactory.create(
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
@patch.object(instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@attr('shard_1')
@patch.object(instructor_task.api, 'get_instructor_task_history')
class TestInstructorEmailContentList(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the instructor email content history endpoint.
"""
def setUp(self):
super(TestInstructorEmailContentList, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.tasks = {}
self.emails = {}
self.emails_info = {}
def setup_fake_email_info(self, num_emails, with_failures=False):
""" Initialize the specified number of fake emails """
for email_id in range(num_emails):
num_sent = random.randint(1, 15401)
if with_failures:
failed = random.randint(1, 15401)
else:
failed = 0
self.tasks[email_id] = FakeContentTask(email_id, num_sent, failed, 'expected')
self.emails[email_id] = FakeEmail(email_id)
self.emails_info[email_id] = FakeEmailInfo(self.emails[email_id], num_sent, failed)
def get_matching_mock_email(self, **kwargs):
""" Returns the matching mock emails for the given id """
email_id = kwargs.get('id', 0)
return self.emails[email_id]
def get_email_content_response(self, num_emails, task_history_request, with_failures=False):
""" Calls the list_email_content endpoint and returns the repsonse """
self.setup_fake_email_info(num_emails, with_failures)
task_history_request.return_value = self.tasks.values()
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.side_effect = self.get_matching_mock_email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
return response
def check_emails_sent(self, num_emails, task_history_request, with_failures=False):
""" Tests sending emails with or without failures """
response = self.get_email_content_response(num_emails, task_history_request, with_failures)
self.assertTrue(task_history_request.called)
expected_email_info = [email_info.to_dict() for email_info in self.emails_info.values()]
actual_email_info = json.loads(response.content)['emails']
self.assertEqual(len(actual_email_info), num_emails)
for exp_email, act_email in zip(expected_email_info, actual_email_info):
self.assertDictEqual(exp_email, act_email)
self.assertEqual(expected_email_info, actual_email_info)
def test_content_list_one_email(self, task_history_request):
""" Test listing of bulk emails when email list has one email """
response = self.get_email_content_response(1, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should have one email
self.assertEqual(len(email_info), 1)
# Email content should be what's expected
expected_message = self.emails[0].html_message
returned_email_info = email_info[0]
received_message = returned_email_info[u'email'][u'html_message']
self.assertEqual(expected_message, received_message)
def test_content_list_no_emails(self, task_history_request):
""" Test listing of bulk emails when email list empty """
response = self.get_email_content_response(0, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should be empty
self.assertEqual(len(email_info), 0)
def test_content_list_email_content_many(self, task_history_request):
""" Test listing of bulk emails sent large amount of emails """
self.check_emails_sent(50, task_history_request)
def test_list_email_content_error(self, task_history_request):
""" Test handling of error retrieving email """
invalid_task = FakeContentTask(0, 0, 0, 'test')
invalid_task.make_invalid_input()
task_history_request.return_value = [invalid_task]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_email_info = json.loads(response.content)['emails']
self.assertEqual(len(returned_email_info), 1)
returned_info = returned_email_info[0]
for info in ['created', 'sent_to', 'email', 'number_sent', 'requester']:
self.assertEqual(returned_info[info], None)
def test_list_email_with_failure(self, task_history_request):
""" Test the handling of email task that had failures """
self.check_emails_sent(1, task_history_request, True)
def test_list_many_emails_with_failures(self, task_history_request):
""" Test the handling of many emails with failures """
self.check_emails_sent(50, task_history_request, True)
def test_list_email_with_no_successes(self, task_history_request):
task_info = FakeContentTask(0, 0, 10, 'expected')
email = FakeEmail(0)
email_info = FakeEmailInfo(email, 0, 10)
task_history_request.return_value = [task_info]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.return_value = email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_info_list = json.loads(response.content)['emails']
self.assertEqual(len(returned_info_list), 1)
returned_info = returned_info_list[0]
expected_info = email_info.to_dict()
self.assertDictEqual(expected_info, returned_info)
@attr('shard_1')
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append(
"Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus',
'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
[u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = SlashSeparatedCourseKey('MITx', '6.002x', '2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(msk_from_problem_urlname(course_id, name).to_deprecated_string(), output)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
msk_from_problem_urlname(*args)
def get_extended_due(course, unit, user):
"""
Gets the overridden due date for the given user on the given unit. Returns
`None` if there is no override set.
"""
try:
override = StudentFieldOverride.objects.get(
course_id=course.id,
student=user,
location=unit.location,
field='due'
)
return DATE_FIELD.from_json(json.loads(override.value))
except StudentFieldOverride.DoesNotExist:
return None
@attr('shard_1')
class TestDueDateExtensions(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
def setUp(self):
"""
Fixtures.
"""
super(TestDueDateExtensions, self).setUp()
due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
course = CourseFactory.create()
week1 = ItemFactory.create(due=due)
week2 = ItemFactory.create(due=due)
week3 = ItemFactory.create() # No due date
course.children = [week1.location.to_deprecated_string(), week2.location.to_deprecated_string(),
week3.location.to_deprecated_string()]
homework = ItemFactory.create(
parent_location=week1.location,
due=due
)
week1.children = [homework.location.to_deprecated_string()]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=homework.location).save()
self.course = course
self.week1 = week1
self.homework = homework
self.week2 = week2
self.week3 = week3
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=course.id)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_change_to_invalid_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '01/01/2009 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_change_nonexistent_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week3.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week3, self.user1)
)
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_reset_nonexistent_extension(self):
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 400, response.content)
def test_reset_extension_to_deleted_date(self):
"""
Test that we can delete a due date extension after deleting the normal
due date, without causing an error.
"""
self.test_change_due_date()
self.week1.due = None
self.week1 = self.store.update_item(self.week1, self.user1.id)
# Now, week1's normal due date is deleted but the extension still exists.
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'url': self.week1.location.to_deprecated_string()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
@attr('shard_1')
@override_settings(REGISTRATION_CODE_LENGTH=8)
class TestCourseRegistrationCodes(ModuleStoreTestCase):
"""
Test data dumps for E-commerce Course Registration Codes.
"""
def setUp(self):
"""
Fixtures.
"""
super(TestCourseRegistrationCodes, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, min_price=50)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
CourseSalesAdminRole(self.course.id).add_users(self.instructor)
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street',
'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(5):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(5):
i += 1
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
@override_settings(FINANCE_EMAIL='finance@example.com')
def test_finance_email_in_recipient_list_when_generating_registration_codes(self):
"""
Test to verify that the invoice will also be sent to the FINANCE_EMAIL when
generating registration codes
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# check for the last mail.outbox, The FINANCE_EMAIL has been appended at the
# very end, when generating registration codes
self.assertEqual(mail.outbox[-1].to[0], 'finance@example.com')
def test_user_invoice_copy_preference(self):
"""
Test to remember user invoice copy preference
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
# user invoice copy preference will be saved in api user preference; model
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], True)
# updating the user invoice copy preference during code generation flow
data['invoice'] = ''
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], False)
def test_generate_course_registration_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
def test_generate_course_registration_with_redeem_url_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
rows = body.split('\n')
index = 1
while index < len(rows):
if rows[index]:
row_data = rows[index].split(',')
code = row_data[0].replace('"', '')
self.assertTrue(row_data[1].startswith('"http')
and row_data[1].endswith('/shoppingcart/register/redeem/{0}/"'.format(code)))
index += 1
@patch.object(instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'second', 'third', 'fourth']))
def test_generate_course_registration_codes_matching_existing_coupon_code(self):
"""
Test the generated course registration code is already in the Coupon Table
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
coupon = Coupon(code='first', course_id=self.course.id.to_deprecated_string(), created_by=self.instructor)
coupon.save()
data = {
'total_registration_codes': 3, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 5) # 1 for headers, 1 for new line at the end and 3 for the actual data
@patch.object(instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'first', 'second', 'third']))
def test_generate_course_registration_codes_integrity_error(self):
"""
Test for the Integrity error against the generated code
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 2, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 4)
def test_spent_course_registration_codes_csv(self):
"""
Test to generate a response of all the spent course registration codes
"""
url = reverse('spent_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'spent_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 7)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'unit_price': 122.45, 'company_contact_email': 'Test@company.com', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(9):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(9):
i += 13
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
data = {'spent_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_active_course_registration_codes_csv(self):
"""
Test to generate a response of all the active course registration codes
"""
url = reverse('active_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'active_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 9)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'active_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_get_all_course_registration_codes_csv(self):
"""
Test to generate a response of all the course registration codes
"""
url = reverse(
'get_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {'download_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 14)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'download_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_pdf_file_throws_exception(self):
"""
test to mock the pdf file generation throws an exception
when generating registration codes.
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
with patch.object(PDFInvoice, 'generate_pdf', side_effect=Exception):
response = self.client.post(generate_code_url, data)
self.assertEqual(response.status_code, 200, response.content)
def test_get_codes_with_sale_invoice(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 5.5, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
url = reverse('get_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'download_company_name': 'Group Invoice'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
def test_with_invalid_unit_price(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 10, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 'invalid', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 400, response.content)
self.assertIn('Could not parse amount as', response.content)
def test_get_historical_coupon_codes(self):
"""
Test to download a response of all the active coupon codes
"""
get_coupon_code_url = reverse(
'get_coupon_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
for i in range(10):
coupon = Coupon(
code='test_code{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True
)
coupon.save()
#now create coupons with the expiration dates
for i in range(5):
coupon = Coupon(
code='coupon{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True,
expiration_date=datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=2)
)
coupon.save()
response = self.client.get(get_coupon_code_url)
self.assertEqual(response.status_code, 200, response.content)
# filter all the coupons
for coupon in Coupon.objects.all():
self.assertIn(
'"{coupon_code}","{course_id}","{discount}","{description}","{expiration_date}","{is_active}",'
'"{code_redeemed_count}","{total_discounted_seats}","{total_discounted_amount}"'.format(
coupon_code=coupon.code,
course_id=coupon.course_id,
discount=coupon.percentage_discount,
description=coupon.description,
expiration_date=coupon.display_expiry_date,
is_active=coupon.is_active,
code_redeemed_count="0",
total_discounted_seats="0",
total_discounted_amount="0",
), response.content
)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_COUPON_CSV_HEADER))
@attr('shard_1')
class TestBulkCohorting(ModuleStoreTestCase):
"""
Test adding users to cohorts in bulk via CSV upload.
"""
def setUp(self):
super(TestBulkCohorting, self).setUp()
self.course = CourseFactory.create()
self.staff_user = StaffFactory(course_key=self.course.id)
self.non_staff_user = UserFactory.create()
self.tempdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tempdir)
def call_add_users_to_cohorts(self, csv_data, suffix='.csv', method='POST'):
"""
Call `add_users_to_cohorts` with a file generated from `csv_data`.
"""
# this temporary file will be removed in `self.tearDown()`
__, file_name = tempfile.mkstemp(suffix=suffix, dir=self.tempdir)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
with open(file_name, 'r') as file_pointer:
url = reverse('add_users_to_cohorts', kwargs={'course_id': unicode(self.course.id)})
if method == 'POST':
return self.client.post(url, {'uploaded-file': file_pointer})
elif method == 'GET':
return self.client.get(url, {'uploaded-file': file_pointer})
def expect_error_on_file_content(self, file_content, error, file_suffix='.csv'):
"""
Verify that we get the error we expect for a given file input.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content, suffix=file_suffix)
self.assertEqual(response.status_code, 400)
result = json.loads(response.content)
self.assertEqual(result['error'], error)
def verify_success_on_file_content(self, file_content, mock_store_upload, mock_cohort_task):
"""
Verify that `addd_users_to_cohorts` successfully validates the
file content, uploads the input file, and triggers the
background task.
"""
mock_store_upload.return_value = (None, 'fake_file_name.csv')
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content)
self.assertEqual(response.status_code, 204)
self.assertTrue(mock_store_upload.called)
self.assertTrue(mock_cohort_task.called)
def test_no_cohort_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a cohort field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'username,email\n', "The file must contain a 'cohort' column containing cohort names."
)
def test_no_username_or_email_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a username or email field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'cohort\n', "The file must contain a 'username' column, an 'email' column, or both."
)
def test_empty_csv(self):
"""
Verify that we get a descriptive verification error when we haven't
included any data in the uploaded CSV.
"""
self.expect_error_on_file_content(
'', "The file must contain a 'cohort' column containing cohort names."
)
def test_wrong_extension(self):
"""
Verify that we get a descriptive verification error when we haven't
uploaded a file with a '.csv' extension.
"""
self.expect_error_on_file_content(
'', "The file must end with the extension '.csv'.", file_suffix='.notcsv'
)
def test_non_staff_no_access(self):
"""
Verify that we can't access the view when we aren't a staff user.
"""
self.client.login(username=self.non_staff_user.username, password='test')
response = self.call_add_users_to_cohorts('')
self.assertEqual(response.status_code, 403)
def test_post_only(self):
"""
Verify that we can't call the view when we aren't using POST.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts('', method='GET')
self.assertEqual(response.status_code, 405)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_username(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call a background task when
the CSV has username and cohort columns.
"""
self.verify_success_on_file_content(
'username,cohort\nfoo_username,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has email and cohort columns.
"""
self.verify_success_on_file_content(
'email,cohort\nfoo_email,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_username_and_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has username, email and cohort columns.
"""
self.verify_success_on_file_content(
'username,email,cohort\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_carriage_return(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns.
"""
self.verify_success_on_file_content(
'username,email,cohort\rfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_carriage_return_line_feed(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns and line
feeds.
"""
self.verify_success_on_file_content(
'username,email,cohort\r\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
|
nagyistoce/edx-platform
|
lms/djangoapps/instructor/tests/test_api.py
|
Python
|
agpl-3.0
| 193,070
|
[
"VisIt"
] |
66bb00d81770a1f02444d89cba5aff4f03b38acb35c4884dbba148a5ca6c687a
|
# -*- coding: utf-8 -*-
"""Deletion functions to supplement :mod:`pybel.struct.mutation.expansion`."""
import logging
import typing
from collections import Counter, defaultdict
from typing import Collection, Iterable, Optional, Tuple
import pybel.struct.mutation.expansion.neighborhood
from pybel import BELGraph
from pybel.constants import ANNOTATIONS
from pybel.dsl import BaseEntity, CentralDogma, ComplexAbundance, CompositeAbundance, Reaction
from pybel.struct.filters import and_edge_predicates, concatenate_node_predicates
from pybel.struct.filters.edge_predicates import edge_has_annotation, is_causal_relation
from pybel.struct.filters.node_predicates import true_node_predicate
from pybel.struct.filters.typing import EdgeIterator, EdgePredicates, NodePredicates
from pybel.struct.pipeline import uni_in_place_transformation
from pybel.typing import EdgeData
__all__ = [
'get_peripheral_successor_edges',
'get_peripheral_predecessor_edges',
'count_sources',
'count_targets',
'count_peripheral_successors',
'count_peripheral_predecessors',
'get_subgraph_edges',
'get_subgraph_peripheral_nodes',
'expand_periphery',
'enrich_complexes',
'enrich_composites',
'enrich_reactions',
'enrich_variants',
'enrich_unqualified',
'expand_internal_causal',
]
logger = logging.getLogger(__name__)
def get_peripheral_successor_edges(graph: BELGraph, subgraph: Collection[BaseEntity]) -> EdgeIterator:
"""Get the set of possible successor edges peripheral to the sub-graph.
The source nodes in this iterable are all inside the sub-graph, while the targets are outside.
"""
for u in subgraph:
for _, v, k in graph.out_edges(u, keys=True):
if v not in subgraph:
yield u, v, k
def get_peripheral_predecessor_edges(graph: BELGraph, subgraph: Collection[BaseEntity]) -> EdgeIterator:
"""Get the set of possible predecessor edges peripheral to the sub-graph.
The target nodes in this iterable are all inside the sub-graph, while the sources are outside.
"""
for v in subgraph:
for u, _, k in graph.in_edges(v, keys=True):
if u not in subgraph:
yield u, v, k
def count_sources(edge_iter: EdgeIterator) -> Counter:
"""Count the source nodes in an edge iterator with keys and data.
:return: A counter of source nodes in the iterable
"""
return Counter(u for u, _, _ in edge_iter)
def count_targets(edge_iter: EdgeIterator) -> Counter:
"""Count the target nodes in an edge iterator with keys and data.
:return: A counter of target nodes in the iterable
"""
return Counter(v for _, v, _ in edge_iter)
def count_peripheral_successors(graph: BELGraph, subgraph: BELGraph) -> typing.Counter[BaseEntity]:
"""Count all peripheral successors of the subgraph.
:param graph: A BEL graph
:param subgraph: An iterator of BEL nodes
:return: A counter of possible successor nodes
"""
return count_targets(get_peripheral_successor_edges(graph, subgraph))
def count_peripheral_predecessors(graph: BELGraph, subgraph: BELGraph) -> typing.Counter[BaseEntity]:
"""Count all peripheral predecessors of the subgraph.
:param graph: A BEL graph
:param subgraph: An iterator of BEL nodes
:return: A counter of possible predecessor nodes
"""
return count_sources(get_peripheral_predecessor_edges(graph, subgraph))
def get_subgraph_edges(
graph: BELGraph,
annotation: str,
value: str,
source_filter: Optional[NodePredicates] = None,
target_filter: Optional[NodePredicates] = None,
) -> Iterable[Tuple[BaseEntity, BaseEntity, str, EdgeData]]:
"""Get all edges from a given subgraph whose source and target nodes pass all of the given filters.
:param graph: A BEL graph
:param annotation: The annotation to search
:param value: The annotation value to search by
:param source_filter: Optional filter for source nodes (graph, node) -> bool
:param target_filter: Optional filter for target nodes (graph, node) -> bool
:return: An iterable of (source node, target node, key, data) for all edges that match the annotation/value and
node filters
"""
if source_filter is None:
source_filter = true_node_predicate
if target_filter is None:
target_filter = true_node_predicate
for u, v, k, data in graph.edges(keys=True, data=True):
if not edge_has_annotation(data, annotation):
continue
if data[ANNOTATIONS][annotation] == value and source_filter(graph, u) and target_filter(graph, v):
yield u, v, k, data
def get_subgraph_peripheral_nodes(
graph: BELGraph,
subgraph: Collection[BaseEntity],
node_predicates: Optional[NodePredicates] = None,
edge_predicates: Optional[EdgePredicates] = None,
):
"""Get a summary dictionary of all peripheral nodes to a given sub-graph.
:return: A dictionary of {external node: {'successor': {internal node: list of (key, dict)},
'predecessor': {internal node: list of (key, dict)}}}
:rtype: dict
For example, it might be useful to quantify the number of predecessors and successors:
>>> from pybel.struct.filters.node_predicates import not_pathology
>>> value = 'Blood vessel dilation subgraph'
>>> sg = get_subgraph_by_annotation_value(graph, annotation='Subgraph', value=value)
>>> p = get_subgraph_peripheral_nodes(graph, sg, node_predicates=not_pathology)
>>> for node in sorted(p, key=lambda n: len(set(p[n]['successor']) | set(p[n]['predecessor'])), reverse=True):
>>> if 1 == len(p[value][node]['successor']) or 1 == len(p[value][node]['predecessor']):
>>> continue
>>> print(node,
>>> len(p[node]['successor']),
>>> len(p[node]['predecessor']),
>>> len(set(p[node]['successor']) | set(p[node]['predecessor'])))
"""
node_filter = concatenate_node_predicates(node_predicates=node_predicates)
edge_filter = and_edge_predicates(edge_predicates=edge_predicates)
result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for u, v, k, d in get_peripheral_successor_edges(graph, subgraph):
if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k):
continue
result[v]['predecessor'][u].append((k, d))
for u, v, k, d in get_peripheral_predecessor_edges(graph, subgraph):
if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k):
continue
result[u]['successor'][v].append((k, d))
return result
@uni_in_place_transformation
def expand_periphery(
universe: BELGraph,
graph: BELGraph,
node_predicates: Optional[NodePredicates] = None,
edge_predicates: Optional[EdgePredicates] = None,
threshold: int = 2,
) -> None:
"""Iterate over all possible edges, peripheral to a given subgraph, that could be added from the given graph.
Edges could be added if they go to nodes that are involved in relationships that occur with more than the
threshold (default 2) number of nodes in the subgraph.
:param universe: The universe of BEL knowledge
:param graph: The (sub)graph to expand
:param threshold: Minimum frequency of betweenness occurrence to add a gap node
A reasonable edge filter to use is :func:`pybel_tools.filters.keep_causal_edges` because this function can allow
for huge expansions if there happen to be hub nodes.
"""
nd = get_subgraph_peripheral_nodes(
universe, graph,
node_predicates=node_predicates,
edge_predicates=edge_predicates,
)
for node, dd in nd.items():
pred_d = dd['predecessor']
succ_d = dd['successor']
in_subgraph_connections = set(pred_d) | set(succ_d)
if threshold > len(in_subgraph_connections):
continue
graph.add_node(node, attr_dict=universe[node])
for u, edges in pred_d.items():
for key, data in edges:
graph.add_edge(u, node, key=key, **data)
for v, edges in succ_d.items():
for key, data in edges:
graph.add_edge(node, v, key=key, **data)
@uni_in_place_transformation
def enrich_complexes(graph: BELGraph) -> None:
"""Add all of the members of the complex abundances to the graph."""
for u in list(graph):
if not isinstance(u, ComplexAbundance):
continue
for v in u.members:
graph.add_part_of(v, u)
@uni_in_place_transformation
def enrich_composites(graph: BELGraph) -> None:
"""Add all of the members of the composite abundances to the graph."""
for u in list(graph):
if not isinstance(u, CompositeAbundance):
continue
for v in u.members:
graph.add_part_of(v, u)
@uni_in_place_transformation
def enrich_reactions(graph: BELGraph) -> None:
"""Add all of the reactants and products of reactions to the graph."""
for u in list(graph):
if not isinstance(u, Reaction):
continue
for v in u.reactants:
graph.add_has_reactant(u, v)
for v in u.products:
graph.add_has_product(u, v)
@uni_in_place_transformation
def enrich_variants(graph: BELGraph) -> None:
"""Add the reference nodes for all variants of the given function."""
for u in list(graph):
if not isinstance(u, CentralDogma):
continue
parent = u.get_parent()
if parent is None:
continue
if parent not in graph:
graph.add_has_variant(parent, u)
@uni_in_place_transformation
def enrich_unqualified(graph: BELGraph) -> None:
"""Enrich the sub-graph with the unqualified edges from the graph.
The reason you might want to do this is you induce a sub-graph from the original graph based on an annotation
filter, but the unqualified edges that don't have annotations that most likely connect elements within your graph
are not included.
.. seealso::
This function thinly wraps the successive application of the following functions:
- :func:`enrich_complexes`
- :func:`enrich_composites`
- :func:`enrich_reactions`
- :func:`enrich_variants`
Equivalent to:
>>> enrich_complexes(graph)
>>> enrich_composites(graph)
>>> enrich_reactions(graph)
>>> enrich_variants(graph)
"""
enrich_complexes(graph)
enrich_composites(graph)
enrich_reactions(graph)
enrich_variants(graph)
@uni_in_place_transformation
def expand_internal_causal(universe: BELGraph, graph: BELGraph) -> None:
"""Add causal edges between entities in the sub-graph.
Is an extremely thin wrapper around :func:`expand_internal`.
:param universe: A BEL graph representing the universe of all knowledge
:param graph: The target BEL graph to enrich with causal relations between contained nodes
Equivalent to:
>>> from pybel.struct import expand_internal, is_causal_relation
>>> expand_internal(universe, graph, edge_predicates=is_causal_relation)
"""
for u, v, key in pybel.struct.mutation.expansion.neighborhood.iterate_internal(universe, graph):
data = universe.edges[u][v][key]
if is_causal_relation(data):
graph.add_edge(u, v, key=key, **data)
|
pybel/pybel-tools
|
src/pybel_tools/mutation/expansion.py
|
Python
|
mit
| 11,457
|
[
"Pybel"
] |
8dce8bbe893b6b51cd17e37f251de608568951923ec44c87d47c60725529661d
|
# encoding: utf-8
# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import logging
import pybreaker
import requests as requests
from jormungandr import cache, app
from jormungandr.realtime_schedule.realtime_proxy import RealtimeProxy, RealtimeProxyError, floor_datetime
from jormungandr.schedule import RealTimePassage
import xml.etree.ElementTree as et
import aniso8601
from datetime import datetime
from flask_restful.inputs import boolean
import six
def to_bool(b):
"""
encapsulate flask_restful.inputs.boolean to prevent exception if format isn't valid
>>> to_bool('true')
True
>>> to_bool('false')
False
>>> to_bool('f')
False
>>> to_bool('t')
False
>>> to_bool('bob')
False
"""
try:
return boolean(b)
except ValueError:
return False
class Siri(RealtimeProxy):
"""
Class managing calls to siri external service providing real-time next passages
curl example to check/test that external service is working:
curl -X POST '{server}' -d '<x:Envelope
xmlns:x="http://schemas.xmlsoap.org/soap/envelope/" xmlns:wsd="http://wsdl.siri.org.uk" xmlns:siri="http://www.siri.org.uk/siri">
<x:Header/>
<x:Body>
<GetStopMonitoring xmlns="http://wsdl.siri.org.uk" xmlns:siri="http://www.siri.org.uk/siri">
<ServiceRequestInfo xmlns="">
<siri:RequestTimestamp>{datetime}</siri:RequestTimestamp>
<siri:RequestorRef>{requestor_ref}</siri:RequestorRef>
<siri:MessageIdentifier>IDontCare</siri:MessageIdentifier>
</ServiceRequestInfo>
<Request version="1.3" xmlns="">
<siri:RequestTimestamp>{datetime}</siri:RequestTimestamp>
<siri:MessageIdentifier>IDontCare</siri:MessageIdentifier>
<siri:MonitoringRef>{stop_code}</siri:MonitoringRef>
<siri:MinimumStopVisitsPerLine>{nb_desired}</siri:MinimumStopVisitsPerLine>
</Request>
<RequestExtension xmlns=""/>
</GetStopMonitoring>
</x:Body>
</x:Envelope>'
{datetime} is iso-formated: YYYY-mm-ddTHH:MM:ss.sss+HH:MM
{requestor_ref} is a configuration parameter
{stop_code} is the stop_point code value, which type is the 'id' of the connector (or 'destination_id_tag' if provided in conf)
ex: for a connector "Siri_BOB", on stop_point_BOB, you should find in the Navitia stop_point response:
"codes": [
{
"type": "Siri_BOB",
"value": "Bobito:StopPoint:BOB:00021201:ITO"
}, ...
{nb_desired} is the requested number of next passages
In practice it will look like:
curl -X POST 'http://bobito.fr:8080/ProfilSiriKidfProducer-Bobito/SiriServices' -d '<x:Envelope
xmlns:x="http://schemas.xmlsoap.org/soap/envelope/" xmlns:wsd="http://wsdl.siri.org.uk" xmlns:siri="http://www.siri.org.uk/siri">
<x:Header/>
<x:Body>
<GetStopMonitoring xmlns="http://wsdl.siri.org.uk" xmlns:siri="http://www.siri.org.uk/siri">
<ServiceRequestInfo xmlns="">
<siri:RequestTimestamp>2018-06-11T17:21:49.703+02:00</siri:RequestTimestamp>
<siri:RequestorRef>BobitoJVM</siri:RequestorRef>
<siri:MessageIdentifier>IDontCare</siri:MessageIdentifier>
</ServiceRequestInfo>
<Request version="1.3" xmlns="">
<siri:RequestTimestamp>2018-06-11T17:21:49.703+02:00</siri:RequestTimestamp>
<siri:MessageIdentifier>IDontCare</siri:MessageIdentifier>
<siri:MonitoringRef>Bobito:StopPoint:BOB:00021201:ITO</siri:MonitoringRef>
<siri:MaximumStopVisits>5</siri:MaximumStopVisits>
</Request>
<RequestExtension xmlns=""/>
</GetStopMonitoring>
</x:Body>
</x:Envelope>'
Then Navitia matches route-points in the response using {stop_code}, {route_code} and {line_code}.
{stop_code}, {route_code} and {line_code} are provided using the same code key, named after
the 'destination_id_tag' if provided on connector's init, or the 'id' otherwise.
"""
def __init__(
self,
id,
service_url,
requestor_ref,
object_id_tag=None,
destination_id_tag=None,
instance=None,
timeout=10,
**kwargs
):
self.service_url = service_url
self.requestor_ref = requestor_ref # login for siri
self.timeout = timeout # timeout in seconds
self.rt_system_id = id
self.object_id_tag = object_id_tag if object_id_tag else id
self.destination_id_tag = destination_id_tag
self.instance = instance
self.breaker = pybreaker.CircuitBreaker(
fail_max=app.config.get('CIRCUIT_BREAKER_MAX_SIRI_FAIL', 5),
reset_timeout=app.config.get('CIRCUIT_BREAKER_SIRI_TIMEOUT_S', 60),
)
# A step is applied on from_datetime to discretize calls and allow caching them
self.from_datetime_step = kwargs.get(
'from_datetime_step', app.config['CACHE_CONFIGURATION'].get('TIMEOUT_SIRI', 60)
)
def __repr__(self):
"""
used as the cache key. we use the rt_system_id to share the cache between servers in production
"""
try:
return self.rt_system_id.encode('utf-8', 'backslashreplace')
except:
return self.rt_system_id
def _get_next_passage_for_route_point(self, route_point, count, from_dt, current_dt, duration=None):
stop = route_point.fetch_stop_id(self.object_id_tag)
request = self._make_request(monitoring_ref=stop, dt=from_dt, count=count)
if not request:
return None
siri_response = self._call_siri(request)
if not siri_response or siri_response.status_code != 200:
raise RealtimeProxyError('invalid response')
logging.getLogger(__name__).debug('siri for {}: {}'.format(stop, siri_response.text))
ns = {'siri': 'http://www.siri.org.uk/siri'}
tree = None
try:
tree = et.fromstring(siri_response.content)
except et.ParseError:
logging.getLogger(__name__).exception("invalid xml")
raise RealtimeProxyError('invalid xml')
self._validate_response_or_raise(tree, ns)
return self._get_passages(tree, ns, route_point)
def status(self):
return {
'id': six.text_type(self.rt_system_id),
'timeout': self.timeout,
'circuit_breaker': {
'current_state': self.breaker.current_state,
'fail_counter': self.breaker.fail_counter,
'reset_timeout': self.breaker.reset_timeout,
},
}
def _validate_response_or_raise(self, tree, ns):
stop_monitoring_delivery = tree.find('.//siri:StopMonitoringDelivery', ns)
if stop_monitoring_delivery is None:
raise RealtimeProxyError('No StopMonitoringDelivery in response')
status = stop_monitoring_delivery.find('.//siri:Status', ns)
if status is not None and not to_bool(status.text):
# Status is false: there is a problem, but we may have a valid response too...
# Lets log whats happening
error_condition = stop_monitoring_delivery.find('.//siri:ErrorCondition', ns)
if error_condition is not None and list(error_condition):
if error_condition.find('.//siri:NoInfoForTopicError', ns) is not None:
# There is no data, we might be at the end of the service
# OR the SIRI server doesn't update it's own data: there is no way to know
# let's say it's normal and not log nor return base_schedule data
return
# Log the error returned by SIRI, there is a node for the normalized error code
# and another node that holds the description
code = " ".join([e.tag for e in list(error_condition) if 'Description' not in e.tag])
description_node = error_condition.find('.//siri:Description', ns)
description = description_node.text if description_node is not None else None
logging.getLogger(__name__).warning('error in siri response: %s/%s', code, description)
monitored_stops = stop_monitoring_delivery.findall('.//siri:MonitoredStopVisit', ns)
if monitored_stops is None or len(monitored_stops) < 1:
# we might want to ignore error that match siri:NoInfoForTopicError,
# maybe it means that there is no next departure, maybe not...
# There is no departures and status is false: this looks like a real error...
# If description contains error message use it in exception (ex:[BAD_ID] MonitoringRef (01001713:TOC))
message = description or 'response status = false'
raise RealtimeProxyError(message)
def _get_passages(self, tree, ns, route_point):
stop = route_point.fetch_stop_id(self.object_id_tag)
line = route_point.fetch_line_id(self.object_id_tag)
route = route_point.fetch_route_id(self.object_id_tag)
next_passages = []
for visit in tree.findall('.//siri:MonitoredStopVisit', ns):
cur_stop = visit.find('.//siri:StopPointRef', ns).text
if stop != cur_stop:
continue
cur_line = visit.find('.//siri:LineRef', ns).text
if line != cur_line:
continue
cur_route = visit.find('.//siri:DirectionName', ns).text
if route != cur_route:
continue
# TODO? we should ignore MonitoredCall with a DepartureStatus set to "Cancelled"
cur_destination = visit.find('.//siri:DestinationName', ns).text
cur_dt = visit.find('.//siri:ExpectedDepartureTime', ns).text
# TODO? fallback on siri:AimedDepartureTime if there is no ExpectedDepartureTime
# In that case we may want to set realtime to False
cur_dt = aniso8601.parse_datetime(cur_dt)
next_passages.append(RealTimePassage(cur_dt, cur_destination))
return next_passages
@cache.memoize(app.config.get(str('CACHE_CONFIGURATION'), {}).get(str('TIMEOUT_SIRI'), 60))
def _call_siri(self, request):
encoded_request = request.encode('utf-8', 'backslashreplace')
headers = {"Content-Type": "text/xml; charset=UTF-8", "Content-Length": str(len(encoded_request))}
logging.getLogger(__name__).debug('siri RT service, post at {}: {}'.format(self.service_url, request))
try:
return self.breaker.call(
requests.post,
url=self.service_url,
headers=headers,
data=encoded_request,
verify=False,
timeout=self.timeout,
)
except pybreaker.CircuitBreakerError as e:
logging.getLogger(__name__).error(
'siri RT service dead, using base ' 'schedule (error: {}'.format(e)
)
raise RealtimeProxyError('circuit breaker open')
except requests.Timeout as t:
logging.getLogger(__name__).error(
'siri RT service timeout, using base ' 'schedule (error: {}'.format(t)
)
raise RealtimeProxyError('timeout')
except Exception as e:
logging.getLogger(__name__).exception('siri RT error, using base schedule')
raise RealtimeProxyError(str(e))
def _make_request(self, dt, count, monitoring_ref):
# we don't want to ask 1000 next departure to SIRI :)
count = min(count or 5, 5) # if no value defined we ask for 5 passages
message_identifier = 'IDontCare'
request = """<?xml version="1.0" encoding="UTF-8"?>
<x:Envelope xmlns:x="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:wsd="http://wsdl.siri.org.uk" xmlns:siri="http://www.siri.org.uk/siri">
<x:Header/>
<x:Body>
<GetStopMonitoring xmlns="http://wsdl.siri.org.uk" xmlns:siri="http://www.siri.org.uk/siri">
<ServiceRequestInfo xmlns="">
<siri:RequestTimestamp>{dt}</siri:RequestTimestamp>
<siri:RequestorRef>{RequestorRef}</siri:RequestorRef>
<siri:MessageIdentifier>{MessageIdentifier}</siri:MessageIdentifier>
</ServiceRequestInfo>
<Request version="1.3" xmlns="">
<siri:RequestTimestamp>{dt}</siri:RequestTimestamp>
<siri:MessageIdentifier>{MessageIdentifier}</siri:MessageIdentifier>
<siri:MonitoringRef>{MonitoringRef}</siri:MonitoringRef>
<siri:MinimumStopVisitsPerLine>{count}</siri:MinimumStopVisitsPerLine>
</Request>
<RequestExtension xmlns=""/>
</GetStopMonitoring>
</x:Body>
</x:Envelope>
""".format(
dt=floor_datetime(datetime.utcfromtimestamp(dt), self.from_datetime_step).isoformat(),
count=count,
RequestorRef=self.requestor_ref,
MessageIdentifier=message_identifier,
MonitoringRef=monitoring_ref,
)
return request
|
xlqian/navitia
|
source/jormungandr/jormungandr/realtime_schedule/siri.py
|
Python
|
agpl-3.0
| 14,796
|
[
"VisIt"
] |
11bcc850bacd5d127ff04fd3dc30db4c90c56b22216e269fbe68aed6def36ac9
|
"""
Implementations of Restricted Boltzmann Machines and associated sampling
strategies.
"""
# Standard library imports
import logging
# Third-party imports
import numpy
N = numpy
np = numpy
from theano.compat import six
from theano.compat.six.moves import xrange
import theano
from theano import tensor
from theano.compat.six.moves import zip as izip
T = tensor
from theano.tensor import nnet
# Local imports
from pylearn2.costs.cost import Cost
from pylearn2.blocks import Block, StackedBlocks
from pylearn2.utils import as_floatX, safe_update, sharedX
from pylearn2.models import Model
from pylearn2.expr.nnet import inverse_sigmoid_numpy
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.space import VectorSpace
from pylearn2.utils import safe_union
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.rng import make_np_rng, make_theano_rng
theano.config.warn.sum_div_dimshuffle_bug = False
logger = logging.getLogger(__name__)
if 0:
logger.warning('using SLOW rng')
RandomStreams = tensor.shared_randomstreams.RandomStreams
else:
import theano.sandbox.rng_mrg
RandomStreams = theano.sandbox.rng_mrg.MRG_RandomStreams
def training_updates(visible_batch, model, sampler, optimizer):
"""
Combine together updates from various sources for RBM training.
Parameters
----------
visible_batch : tensor_like
Theano symbolic representing a minibatch on the visible units,
with the first dimension indexing training examples and the second
indexing data dimensions.
model : object
An instance of `RBM` or a derived class, or one implementing
the RBM interface.
sampler : object
An instance of `Sampler` or a derived class, or one implementing
the sampler interface.
optimizer : object
An instance of `_Optimizer` or a derived class, or one implementing
the optimizer interface (typically an `_SGDOptimizer`).
Returns
-------
WRITEME
"""
# TODO: the Optimizer object got deprecated, and this is the only
# functionality that requires it. We moved the Optimizer
# here with an _ before its name.
# We should figure out how best to refactor the code.
# Optimizer was problematic because people kept using SGDOptimizer
# instead of training_algorithms.sgd.
# Compute negative phase updates.
sampler_updates = sampler.updates()
# Compute SML gradients.
pos_v = visible_batch
#neg_v = sampler_updates[sampler.particles]
neg_v = sampler.particles
grads = model.ml_gradients(pos_v, neg_v)
# Build updates dictionary combining (gradient, sampler) updates.
ups = optimizer.updates(gradients=grads)
safe_update(ups, sampler_updates)
return ups
class Sampler(object):
"""
A sampler is responsible for implementing a sampling strategy on top of
an RBM, which may include retaining state e.g. the negative particles for
Persistent Contrastive Divergence.
Parameters
----------
rbm : object
An instance of `RBM` or a derived class, or one implementing
the `gibbs_step_for_v` interface.
particles : numpy.ndarray
An initial state for the set of persistent Narkov chain particles
that will be updated at every step of learning.
rng : RandomState object
NumPy random number generator object used to initialize a
RandomStreams object used in training.
"""
def __init__(self, rbm, particles, rng):
self.__dict__.update(rbm=rbm)
rng = make_np_rng(rng, which_method="randn")
seed = int(rng.randint(2 ** 30))
self.s_rng = make_theano_rng(seed, which_method="binomial")
self.particles = sharedX(particles, name='particles')
def updates(self):
"""
Get the dictionary of updates for the sampler's persistent state
at each step.
Returns
-------
updates : dict
Dictionary with shared variable instances as keys and symbolic
expressions indicating how they should be updated as values.
Notes
-----
In the `Sampler` base class, this is simply a stub.
"""
raise NotImplementedError()
class BlockGibbsSampler(Sampler):
"""
Implements a persistent Markov chain based on block gibbs sampling
for use with Persistent Contrastive
Divergence, a.k.a. stochastic maximum likelhiood, as described in [1].
.. [1] T. Tieleman. "Training Restricted Boltzmann Machines using
approximations to the likelihood gradient". Proceedings of the 25th
International Conference on Machine Learning, Helsinki, Finland,
2008. http://www.cs.toronto.edu/~tijmen/pcd/pcd.pdf
Parameters
----------
rbm : object
An instance of `RBM` or a derived class, or one implementing
the `gibbs_step_for_v` interface.
particles : ndarray
An initial state for the set of persistent Markov chain particles
that will be updated at every step of learning.
rng : RandomState object
NumPy random number generator object used to initialize a
RandomStreams object used in training.
steps : int, optional
Number of Gibbs steps to run the Markov chain for at each
iteration.
particles_clip : None or (min, max) pair, optional
The values of the returned particles will be clipped between
min and max.
"""
def __init__(self, rbm, particles, rng, steps=1, particles_clip=None):
super(BlockGibbsSampler, self).__init__(rbm, particles, rng)
self.steps = steps
self.particles_clip = particles_clip
def updates(self, particles_clip=None):
"""
Get the dictionary of updates for the sampler's persistent state
at each step.
Parameters
----------
particles_clip : WRITEME
Returns
-------
updates : dict
Dictionary with shared variable instances as keys and symbolic
expressions indicating how they should be updated as values.
"""
steps = self.steps
particles = self.particles
# TODO: do this with scan?
for i in xrange(steps):
particles, _locals = self.rbm.gibbs_step_for_v(
particles,
self.s_rng
)
assert particles.type.dtype == self.particles.type.dtype
if self.particles_clip is not None:
p_min, p_max = self.particles_clip
# The clipped values should still have the same type
dtype = particles.dtype
p_min = tensor.as_tensor_variable(p_min)
if p_min.dtype != dtype:
p_min = tensor.cast(p_min, dtype)
p_max = tensor.as_tensor_variable(p_max)
if p_max.dtype != dtype:
p_max = tensor.cast(p_max, dtype)
particles = tensor.clip(particles, p_min, p_max)
if not hasattr(self.rbm, 'h_sample'):
self.rbm.h_sample = sharedX(numpy.zeros((0, 0)), 'h_sample')
return {
self.particles: particles,
# TODO: self.rbm.h_sample is never used, why is that here?
# Moreover, it does not make sense for things like ssRBM.
self.rbm.h_sample: _locals['h_mean']
}
class RBM(Block, Model):
"""
A base interface for RBMs, implementing the binary-binary case.
Parameters
----------
nvis : int, optional
Number of visible units in the model.
(Specifying this implies that the model acts on a vector,
i.e. it sets vis_space = pylearn2.space.VectorSpace(nvis) )
nhid : int, optional
Number of hidden units in the model.
(Specifying this implies that the model acts on a vector)
vis_space : pylearn2.space.Space, optional
Space object describing what kind of vector space the RBM acts
on. Don't specify if you used nvis / hid
hid_space: pylearn2.space.Space, optional
Space object describing what kind of vector space the RBM's
hidden units live in. Don't specify if you used nvis / nhid
transformer : WRITEME
irange : float, optional
The size of the initial interval around 0 for weights.
rng : RandomState object or seed, optional
NumPy RandomState object to use when initializing parameters
of the model, or (integer) seed to use to create one.
init_bias_vis : array_like, optional
Initial value of the visible biases, broadcasted as necessary.
init_bias_vis_marginals : pylearn2.datasets.dataset.Dataset or None
Optional. Dataset used to initialize the visible biases to the
inverse sigmoid of the data marginals
init_bias_hid : array_like, optional
initial value of the hidden biases, broadcasted as necessary.
base_lr : float, optional
The base learning rate
anneal_start : int, optional
Number of steps after which to start annealing on a 1/t schedule
nchains : int, optional
Number of negative chains
sml_gibbs_steps : int, optional
Number of gibbs steps to take per update
random_patches_src : pylearn2.datasets.dataset.Dataset or None
Optional. Dataset from which to draw random patches in order to
initialize the weights. Patches will be multiplied by irange.
monitor_reconstruction : bool, optional
If True, will request a monitoring channel to monitor
reconstruction error
Notes
-----
The `RBM` class is redundant now that we have a `DBM` class, since
an RBM is just a DBM with one hidden layer. Users of pylearn2 should
use single-layer DBMs when possible. Not all RBM functionality has
been ported to the DBM framework yet, so this is not always possible.
(Examples: spike-and-slab RBMs, score matching, denoising score matching)
pylearn2 developers should not add new features to the RBM class or
add new RBM subclasses. pylearn2 developers should only add documentation
and bug fixes to the RBM class and subclasses. pylearn2 developers should
finish porting all RBM functionality to the DBM framework, then turn
the RBM class into a thin wrapper around the DBM class that allocates
a single layer DBM.
"""
def __init__(self, nvis = None, nhid = None,
vis_space = None,
hid_space = None,
transformer = None,
irange=0.5, rng=None, init_bias_vis = None,
init_bias_vis_marginals = None, init_bias_hid=0.0,
base_lr = 1e-3, anneal_start = None, nchains = 100,
sml_gibbs_steps = 1,
random_patches_src = None,
monitor_reconstruction = False):
Model.__init__(self)
Block.__init__(self)
if init_bias_vis_marginals is not None:
assert init_bias_vis is None
X = init_bias_vis_marginals.X
assert X.min() >= 0.0
assert X.max() <= 1.0
marginals = X.mean(axis=0)
#rescale the marginals a bit to avoid NaNs
init_bias_vis = inverse_sigmoid_numpy(.01 + .98 * marginals)
if init_bias_vis is None:
init_bias_vis = 0.0
rng = make_np_rng(rng, 1001, which_method="uniform")
self.rng = rng
if vis_space is None:
#if we don't specify things in terms of spaces and a transformer,
#assume dense matrix multiplication and work off of nvis, nhid
assert hid_space is None
assert transformer is None or isinstance(transformer,MatrixMul)
assert nvis is not None
assert nhid is not None
if transformer is None:
if random_patches_src is None:
W = rng.uniform(-irange, irange, (nvis, nhid))
else:
if hasattr(random_patches_src, '__array__'):
W = irange * random_patches_src.T
assert W.shape == (nvis, nhid)
else:
W = irange * random_patches_src.get_batch_design(
nhid).T
self.transformer = MatrixMul( sharedX(
W,
name='W',
borrow=True
)
)
else:
self.transformer = transformer
self.vis_space = VectorSpace(nvis)
self.hid_space = VectorSpace(nhid)
else:
assert hid_space is not None
assert transformer is not None
assert nvis is None
assert nhid is None
self.vis_space = vis_space
self.hid_space = hid_space
self.transformer = transformer
try:
b_vis = self.vis_space.get_origin()
b_vis += init_bias_vis
except ValueError:
reraise_as(ValueError("bad shape or value for init_bias_vis"))
self.bias_vis = sharedX(b_vis, name='bias_vis', borrow=True)
try:
b_hid = self.hid_space.get_origin()
b_hid += init_bias_hid
except ValueError:
reraise_as(ValueError('bad shape or value for init_bias_hid'))
self.bias_hid = sharedX(b_hid, name='bias_hid', borrow=True)
self.random_patches_src = random_patches_src
self.register_names_to_del(['random_patches_src'])
self.__dict__.update(nhid=nhid, nvis=nvis)
self._params = safe_union(self.transformer.get_params(),
[self.bias_vis, self.bias_hid])
self.base_lr = base_lr
self.anneal_start = anneal_start
self.nchains = nchains
self.sml_gibbs_steps = sml_gibbs_steps
def get_default_cost(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("The RBM class predates the current "
"Cost-based training algorithms (SGD and BGD). To train "
"the RBM with PCD, use DefaultTrainingAlgorithm rather "
"than SGD or BGD. Some RBM subclassess may also be "
"trained with SGD or BGD by using the "
"Cost classes defined in pylearn2.costs.ebm_estimation. "
"Note that it is also possible to make an RBM by allocating "
"a DBM with only one hidden layer. The DBM class is newer "
"and supports training with SGD / BGD. In the long run we "
"should remove the old RBM class and turn it into a wrapper "
"around the DBM class that makes a 1-layer DBM.")
def get_input_dim(self):
"""
Returns
-------
dim : int
The number of elements in the input, if the input is a vector.
"""
if not isinstance(self.vis_space, VectorSpace):
raise TypeError("Can't describe " + str(type(self.vis_space))
+ " as a dimensionality number.")
return self.vis_space.dim
def get_output_dim(self):
"""
Returns
-------
dim : int
The number of elements in the output, if the output is a vector.
"""
if not isinstance(self.hid_space, VectorSpace):
raise TypeError("Can't describe " + str(type(self.hid_space))
+ " as a dimensionality number.")
return self.hid_space.dim
def get_input_space(self):
"""
.. todo::
WRITEME
"""
return self.vis_space
def get_output_space(self):
"""
.. todo::
WRITEME
"""
return self.hid_space
def get_params(self):
"""
.. todo::
WRITEME
"""
return [param for param in self._params]
def get_weights(self, borrow=False):
"""
.. todo::
WRITEME
"""
weights ,= self.transformer.get_params()
return weights.get_value(borrow=borrow)
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
return self.transformer.get_weights_topo()
def get_weights_format(self):
"""
.. todo::
WRITEME
"""
return ['v', 'h']
def get_monitoring_channels(self, data):
"""
.. todo::
WRITEME
"""
V = data
theano_rng = make_theano_rng(None, 42, which_method="binomial")
H = self.mean_h_given_v(V)
h = H.mean(axis=0)
return { 'bias_hid_min' : T.min(self.bias_hid),
'bias_hid_mean' : T.mean(self.bias_hid),
'bias_hid_max' : T.max(self.bias_hid),
'bias_vis_min' : T.min(self.bias_vis),
'bias_vis_mean' : T.mean(self.bias_vis),
'bias_vis_max': T.max(self.bias_vis),
'h_min' : T.min(h),
'h_mean': T.mean(h),
'h_max' : T.max(h),
'reconstruction_error' : self.reconstruction_error(V,
theano_rng) }
def get_monitoring_data_specs(self):
"""
Get the data_specs describing the data for get_monitoring_channel.
This implementation returns specification corresponding to unlabeled
inputs.
Returns
-------
WRITEME
"""
return (self.get_input_space(), self.get_input_source())
def ml_gradients(self, pos_v, neg_v):
"""
Get the contrastive gradients given positive and negative phase
visible units.
Parameters
----------
pos_v : tensor_like
Theano symbolic representing a minibatch on the visible units,
with the first dimension indexing training examples and the
second indexing data dimensions (usually actual training data).
neg_v : tensor_like
Theano symbolic representing a minibatch on the visible units,
with the first dimension indexing training examples and the
second indexing data dimensions (usually reconstructions of the
data or sampler particles from a persistent Markov chain).
Returns
-------
grads : list
List of Theano symbolic variables representing gradients with
respect to model parameters, in the same order as returned by
`params()`.
Notes
-----
`pos_v` and `neg_v` need not have the same first dimension, i.e.
minibatch size.
"""
# taking the mean over each term independently allows for different
# mini-batch sizes in the positive and negative phase.
ml_cost = (self.free_energy_given_v(pos_v).mean() -
self.free_energy_given_v(neg_v).mean())
grads = tensor.grad(ml_cost, self.get_params(),
consider_constant=[pos_v, neg_v])
return grads
def train_batch(self, dataset, batch_size):
"""
.. todo::
WRITEME properly
A default learning rule based on SML
"""
self.learn_mini_batch(dataset.get_batch_design(batch_size))
return True
def learn_mini_batch(self, X):
"""
.. todo::
WRITEME
A default learning rule based on SML
"""
if not hasattr(self, 'learn_func'):
self.redo_theano()
rval = self.learn_func(X)
return rval
def redo_theano(self):
"""
Compiles the theano function for the default learning rule
"""
init_names = dir(self)
minibatch = tensor.matrix()
optimizer = _SGDOptimizer(self, self.base_lr, self.anneal_start)
sampler = sampler = BlockGibbsSampler(self, 0.5 + np.zeros((
self.nchains, self.get_input_dim())), self.rng,
steps= self.sml_gibbs_steps)
updates = training_updates(visible_batch=minibatch, model=self,
sampler=sampler, optimizer=optimizer)
self.learn_func = theano.function([minibatch], updates=updates)
final_names = dir(self)
self.register_names_to_del([name for name in final_names
if name not in init_names])
def gibbs_step_for_v(self, v, rng):
"""
Do a round of block Gibbs sampling given visible configuration
Parameters
----------
v : tensor_like
Theano symbolic representing the hidden unit states for a batch
of training examples (or negative phase particles), with the
first dimension indexing training examples and the second
indexing data dimensions.
rng : RandomStreams object
Random number generator to use for sampling the hidden and
visible units.
Returns
-------
v_sample : tensor_like
Theano symbolic representing the new visible unit state after one
round of Gibbs sampling.
locals : dict
Contains the following auxiliary state as keys (all symbolics
except shape tuples):
* `h_mean`: the returned value from `mean_h_given_v`
* `h_mean_shape`: shape tuple indicating the size of
`h_mean` and `h_sample`
* `h_sample`: the stochastically sampled hidden units
* `v_mean_shape`: shape tuple indicating the shape of
`v_mean` and `v_sample`
* `v_mean`: the returned value from `mean_v_given_h`
* `v_sample`: the stochastically sampled visible units
"""
h_mean = self.mean_h_given_v(v)
assert h_mean.type.dtype == v.type.dtype
# For binary hidden units
# TODO: factor further to extend to other kinds of hidden units
# (e.g. spike-and-slab)
h_sample = rng.binomial(size = h_mean.shape, n = 1 , p = h_mean,
dtype=h_mean.type.dtype)
assert h_sample.type.dtype == v.type.dtype
# v_mean is always based on h_sample, not h_mean, because we don't
# want h transmitting more than one bit of information per unit.
v_mean = self.mean_v_given_h(h_sample)
assert v_mean.type.dtype == v.type.dtype
v_sample = self.sample_visibles([v_mean], v_mean.shape, rng)
assert v_sample.type.dtype == v.type.dtype
return v_sample, locals()
def sample_visibles(self, params, shape, rng):
"""
Stochastically sample the visible units given hidden unit
configurations for a set of training examples.
Parameters
----------
params : list
List of the necessary parameters to sample :math:`p(v|h)`. In the
case of a binary-binary RBM this is a single-element list
containing the symbolic representing :math:`p(v|h)`, as returned
by `mean_v_given_h`.
Returns
-------
vprime : tensor_like
Theano symbolic representing stochastic samples from :math:`p(v|h)`
"""
v_mean = params[0]
return as_floatX(rng.uniform(size=shape) < v_mean)
def input_to_h_from_v(self, v):
"""
Compute the affine function (linear map plus bias) that serves as
input to the hidden layer in an RBM.
Parameters
----------
v : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the one or several
minibatches on the visible units, with the first dimension
indexing training examples and the second indexing data dimensions.
Returns
-------
a : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the input to each
hidden unit for each training example.
"""
if isinstance(v, tensor.Variable):
return self.bias_hid + self.transformer.lmul(v)
else:
return [self.input_to_h_from_v(vis) for vis in v]
def input_to_v_from_h(self, h):
"""
Compute the affine function (linear map plus bias) that serves as
input to the visible layer in an RBM.
Parameters
----------
h : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the one or several
minibatches on the hidden units, with the first dimension
indexing training examples and the second indexing data dimensions.
Returns
-------
a : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the input to each
visible unit for each row of h.
"""
if isinstance(h, tensor.Variable):
return self.bias_vis + self.transformer.lmul_T(h)
else:
return [self.input_to_v_from_h(hid) for hid in h]
def upward_pass(self, v):
"""
Wrapper around mean_h_given_v method. Called when RBM is accessed
by mlp.HiddenLayer.
"""
return self.mean_h_given_v(v)
def mean_h_given_v(self, v):
"""
Compute the mean activation of the hidden units given visible unit
configurations for a set of training examples.
Parameters
----------
v : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the hidden unit
states for a batch (or several) of training examples, with the
first dimension indexing training examples and the second
indexing data dimensions.
Returns
-------
h : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the mean
(deterministic) hidden unit activations given the visible units.
"""
if isinstance(v, tensor.Variable):
return nnet.sigmoid(self.input_to_h_from_v(v))
else:
return [self.mean_h_given_v(vis) for vis in v]
def mean_v_given_h(self, h):
"""
Compute the mean activation of the visibles given hidden unit
configurations for a set of training examples.
Parameters
----------
h : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the hidden unit
states for a batch (or several) of training examples, with the
first dimension indexing training examples and the second
indexing hidden units.
Returns
-------
vprime : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the mean
(deterministic) reconstruction of the visible units given the
hidden units.
"""
if isinstance(h, tensor.Variable):
return nnet.sigmoid(self.input_to_v_from_h(h))
else:
return [self.mean_v_given_h(hid) for hid in h]
def free_energy_given_v(self, v):
"""
Calculate the free energy of a visible unit configuration by
marginalizing over the hidden units.
Parameters
----------
v : tensor_like
Theano symbolic representing the hidden unit states for a batch
of training examples, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
f : tensor_like
1-dimensional tensor (vector) representing the free energy
associated with each row of v.
"""
sigmoid_arg = self.input_to_h_from_v(v)
return (-tensor.dot(v, self.bias_vis) -
nnet.softplus(sigmoid_arg).sum(axis=1))
def free_energy(self, V):
return self.free_energy_given_v(V)
def free_energy_given_h(self, h):
"""
Calculate the free energy of a hidden unit configuration by
marginalizing over the visible units.
Parameters
----------
h : tensor_like
Theano symbolic representing the hidden unit states, with the
first dimension indexing training examples and the second
indexing data dimensions.
Returns
-------
f : tensor_like
1-dimensional tensor (vector) representing the free energy
associated with each row of v.
"""
sigmoid_arg = self.input_to_v_from_h(h)
return (-tensor.dot(h, self.bias_hid) -
nnet.softplus(sigmoid_arg).sum(axis=1))
def __call__(self, v):
"""
Forward propagate (symbolic) input through this module, obtaining
a representation to pass on to layers above.
This just aliases the `mean_h_given_v()` function for syntactic
sugar/convenience.
"""
return self.mean_h_given_v(v)
def reconstruction_error(self, v, rng):
"""
Compute the mean-squared error (mean over examples, sum over units)
across a minibatch after a Gibbs step starting from the training data.
Parameters
----------
v : tensor_like
Theano symbolic representing the hidden unit states for a batch
of training examples, with the first dimension indexing training
examples and the second indexing data dimensions.
rng : RandomStreams object
Random number generator to use for sampling the hidden and
visible units.
Returns
-------
mse : tensor_like
0-dimensional tensor (essentially a scalar) indicating the mean
reconstruction error across the minibatch.
Notes
-----
The reconstruction used to assess error samples only the hidden
units. For the visible units, it uses the conditional mean. No sampling
of the visible units is done, to reduce noise in the estimate.
"""
sample, _locals = self.gibbs_step_for_v(v, rng)
return ((_locals['v_mean'] - v) ** 2).sum(axis=1).mean()
class GaussianBinaryRBM(RBM):
"""
An RBM with Gaussian visible units and binary hidden units.
Parameters
----------
energy_function_class : WRITEME
nvis : int, optional
Number of visible units in the model.
nhid : int, optional
Number of hidden units in the model.
vis_space : WRITEME
hid_space : WRITEME
irange : float, optional
The size of the initial interval around 0 for weights.
rng : RandomState object or seed, optional
NumPy RandomState object to use when initializing parameters
of the model, or (integer) seed to use to create one.
mean_vis : bool, optional
Don't actually sample visibles; make sample method simply return
mean.
init_sigma : float or numpy.ndarray, optional
Initial value of the sigma variable. If init_sigma is a scalar
and sigma is not, will be broadcasted.
learn_sigma : bool, optional
WRITEME
sigma_lr_scale : float, optional
WRITEME
init_bias_hid : scalar or 1-d array of length `nhid`
Initial value for the biases on hidden units.
min_sigma, max_sigma : float, float, optional
Elements of sigma are clipped to this range during learning
"""
def __init__(self, energy_function_class,
nvis = None,
nhid = None,
vis_space = None,
hid_space = None,
transformer = None,
irange=0.5, rng=None,
mean_vis=False, init_sigma=2., learn_sigma=False,
sigma_lr_scale=1., init_bias_hid=0.0,
min_sigma = .1, max_sigma = 10.):
super(GaussianBinaryRBM, self).__init__(nvis = nvis, nhid = nhid,
transformer = transformer,
vis_space = vis_space,
hid_space = hid_space,
irange = irange, rng = rng,
init_bias_hid = init_bias_hid)
self.learn_sigma = learn_sigma
self.init_sigma = init_sigma
self.sigma_lr_scale = float(sigma_lr_scale)
if energy_function_class.supports_vector_sigma():
base = N.ones(nvis)
else:
base = 1
self.sigma_driver = sharedX(
base * init_sigma / self.sigma_lr_scale,
name='sigma_driver',
borrow=True
)
self.sigma = self.sigma_driver * self.sigma_lr_scale
self.min_sigma = min_sigma
self.max_sigma = max_sigma
if self.learn_sigma:
self._params.append(self.sigma_driver)
self.mean_vis = mean_vis
self.energy_function = energy_function_class(
transformer = self.transformer,
sigma=self.sigma,
bias_vis=self.bias_vis,
bias_hid=self.bias_hid
)
def _modify_updates(self, updates):
"""
.. todo::
WRITEME
"""
if self.sigma_driver in updates:
assert self.learn_sigma
updates[self.sigma_driver] = T.clip(
updates[self.sigma_driver],
self.min_sigma / self.sigma_lr_scale,
self.max_sigma / self.sigma_lr_scale
)
def score(self, V):
"""
.. todo::
WRITEME
"""
return self.energy_function.score(V)
def P_H_given_V(self, V):
"""
.. todo::
WRITEME
"""
return self.energy_function.mean_H_given_V(V)
def mean_h_given_v(self, v):
"""
.. todo::
WRITEME
"""
return self.P_H_given_V(v)
def mean_v_given_h(self, h):
"""
Compute the mean activation of the visibles given hidden unit
configurations for a set of training examples.
Parameters
----------
h : tensor_like
Theano symbolic representing the hidden unit states for a batch
of training examples, with the first dimension indexing training
examples and the second indexing hidden units.
Returns
-------
vprime : tensor_like
Theano symbolic representing the mean (deterministic)
reconstruction of the visible units given the hidden units.
"""
return self.energy_function.mean_V_given_H(h)
#return self.bias_vis + self.sigma * tensor.dot(h, self.weights.T)
def free_energy_given_v(self, V):
"""
Calculate the free energy of a visible unit configuration by
marginalizing over the hidden units.
Parameters
----------
v : tensor_like
Theano symbolic representing the hidden unit states for a batch
of training examples, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
f : tensor_like
1-dimensional tensor representing the free energy of the visible
unit configuration for each example in the batch
"""
"""hid_inp = self.input_to_h_from_v(v)
squared_term = ((self.bias_vis - v) ** 2.) / (2. * self.sigma)
rval = squared_term.sum(axis=1) - nnet.softplus(hid_inp).sum(axis=1)
assert len(rval.type.broadcastable) == 1"""
return self.energy_function.free_energy(V)
def free_energy(self, V):
"""
.. todo::
WRITEME
"""
return self.energy_function.free_energy(V)
def sample_visibles(self, params, shape, rng):
"""
Stochastically sample the visible units given hidden unit
configurations for a set of training examples.
Parameters
----------
params : list
List of the necessary parameters to sample :math:`p(v|h)`.
In the case of a Gaussian-binary RBM this is a single-element
list containing the conditional mean.
shape : WRITEME
rng : WRITEME
Returns
-------
vprime : tensor_like
Theano symbolic representing stochastic samples from
:math:`p(v|h)`
Notes
-----
If `mean_vis` is specified as `True` in the constructor, this is
equivalent to a call to `mean_v_given_h`.
"""
v_mean = params[0]
if self.mean_vis:
return v_mean
else:
# zero mean, std sigma noise
zero_mean = rng.normal(size=shape) * self.sigma
return zero_mean + v_mean
class mu_pooled_ssRBM(RBM):
"""
.. todo::
WRITEME
Parameters
----------
alpha : WRITEME
Vector of length nslab, diagonal precision term on s.
b : WRITEME
Vector of length nhid, hidden unit bias.
B : WRITEME
Vector of length nvis, diagonal precision on v. Lambda in ICML2011
paper.
Lambda : WRITEME
Matrix of shape nvis x nhid, whose i-th column encodes a diagonal
precision on v, conditioned on h_i. phi in ICML2011 paper.
log_alpha : WRITEME
Vector of length nslab, precision on s.
mu : WRITEME
Vector of length nslab, mean parameter on s.
W : WRITEME
Matrix of shape nvis x nslab, weights of the nslab linear filters s.
"""
def __init__(self, nvis, nhid, n_s_per_h,
batch_size,
alpha0, alpha_irange,
b0,
B0,
Lambda0, Lambda_irange,
mu0,
W_irange=None,
rng=None):
rng = make_np_rng(rng, 1001, which_method="rand")
self.nhid = nhid
self.nslab = nhid * n_s_per_h
self.n_s_per_h = n_s_per_h
self.nvis = nvis
self.batch_size = batch_size
# configure \alpha: precision parameter on s
alpha_init = numpy.zeros(self.nslab) + alpha0
if alpha_irange > 0:
alpha_init += (2 * rng.rand(self.nslab) - 1) * alpha_irange
self.log_alpha = sharedX(numpy.log(alpha_init), name='log_alpha')
self.alpha = tensor.exp(self.log_alpha)
self.alpha.name = 'alpha'
self.mu = sharedX(
numpy.zeros(self.nslab) + mu0,
name='mu', borrow=True)
self.b = sharedX(
numpy.zeros(self.nhid) + b0,
name='b', borrow=True)
if W_irange is None:
# Derived closed to Xavier Glorot's magic formula
W_irange = 2 / numpy.sqrt(nvis * nhid)
self.W = sharedX(
(.5 - rng.rand(self.nvis, self.nslab)) * 2 * W_irange,
name='W', borrow=True)
# THE BETA IS IGNORED DURING TRAINING - FIXED AT MARGINAL DISTRIBUTION
self.B = sharedX(numpy.zeros(self.nvis) + B0, name='B', borrow=True)
if Lambda_irange > 0:
L = (rng.rand(self.nvis, self.nhid) * Lambda_irange
+ Lambda0)
else:
L = numpy.zeros((self.nvis, self.nhid)) + Lambda0
self.Lambda = sharedX(L, name='Lambda', borrow=True)
self._params = [
self.mu,
self.B,
self.Lambda,
self.W,
self.b,
self.log_alpha]
#def ml_gradients(self, pos_v, neg_v):
# inherited version is OK.
def gibbs_step_for_v(self, v, rng):
"""
.. todo::
WRITEME
"""
# Sometimes, the number of examples in the data set is not a
# multiple of self.batch_size.
batch_size = v.shape[0]
# sample h given v
h_mean = self.mean_h_given_v(v)
h_mean_shape = (batch_size, self.nhid)
h_sample = rng.binomial(size=h_mean_shape,
n = 1, p = h_mean, dtype = h_mean.dtype)
# sample s given (v,h)
s_mu, s_var = self.mean_var_s_given_v_h1(v)
s_mu_shape = (batch_size, self.nslab)
s_sample = s_mu + rng.normal(size=s_mu_shape) * tensor.sqrt(s_var)
#s_sample=(s_sample.reshape()*h_sample.dimshuffle(0,1,'x')).flatten(2)
# sample v given (s,h)
v_mean, v_var = self.mean_var_v_given_h_s(h_sample, s_sample)
v_mean_shape = (batch_size, self.nvis)
v_sample = rng.normal(size=v_mean_shape) * tensor.sqrt(v_var) + v_mean
del batch_size
return v_sample, locals()
## TODO?
def sample_visibles(self, params, shape, rng):
"""
.. todo::
WRITEME
"""
raise NotImplementedError('mu_pooled_ssRBM.sample_visibles')
def input_to_h_from_v(self, v):
"""
.. todo::
WRITEME
"""
D = self.Lambda
alpha = self.alpha
def sum_s(x):
return x.reshape((
-1,
self.nhid,
self.n_s_per_h)).sum(axis=2)
return tensor.add(
self.b,
-0.5 * tensor.dot(v * v, D),
sum_s(self.mu * tensor.dot(v, self.W)),
sum_s(0.5 * tensor.sqr(tensor.dot(v, self.W)) / alpha))
#def mean_h_given_v(self, v):
# inherited version is OK:
# return nnet.sigmoid(self.input_to_h_from_v(v))
def mean_var_v_given_h_s(self, h, s):
"""
.. todo::
WRITEME
"""
v_var = 1 / (self.B + tensor.dot(h, self.Lambda.T))
s3 = s.reshape((
-1,
self.nhid,
self.n_s_per_h))
hs = h.dimshuffle(0, 1, 'x') * s3
v_mu = tensor.dot(hs.flatten(2), self.W.T) * v_var
return v_mu, v_var
def mean_var_s_given_v_h1(self, v):
"""
.. todo::
WRITEME
"""
alpha = self.alpha
return (self.mu + tensor.dot(v, self.W) / alpha,
1.0 / alpha)
## TODO?
def mean_v_given_h(self, h):
"""
.. todo::
WRITEME
"""
raise NotImplementedError('mu_pooled_ssRBM.mean_v_given_h')
def free_energy_given_v(self, v):
"""
.. todo::
WRITEME
"""
sigmoid_arg = self.input_to_h_from_v(v)
return tensor.add(
0.5 * (self.B * (v ** 2)).sum(axis=1),
-tensor.nnet.softplus(sigmoid_arg).sum(axis=1))
#def __call__(self, v):
# inherited version is OK
#def reconstruction_error:
# inherited version should be OK
#def params(self):
# inherited version is OK.
def build_stacked_RBM(nvis, nhids, batch_size, vis_type='binary',
input_mean_vis=None, irange=1e-3, rng=None):
"""
.. todo::
WRITEME properly
Note from IG:
This method doesn't seem to work correctly with Gaussian RBMs.
In general, this is a difficult function to support, because it
needs to pass the write arguments to the constructor of many kinds
of RBMs. It would probably be better to just construct an instance
of pylearn2.models.mlp.MLP with its hidden layers set to instances
of pylearn2.models.mlp.RBM_Layer. If anyone is working on this kind
of problem, a PR replacing this function with a helper function to
make such an MLP would be very welcome.
Allocate a StackedBlocks containing RBMs.
The visible units of the input RBM can be either binary or gaussian,
the other ones are all binary.
"""
#TODO: not sure this is the right way of dealing with mean_vis.
layers = []
assert vis_type in ['binary', 'gaussian']
if vis_type == 'binary':
assert input_mean_vis is None
elif vis_type == 'gaussian':
assert input_mean_vis in (True, False)
# The number of visible units in each layer is the initial input
# size and the first k-1 hidden unit sizes.
nviss = [nvis] + nhids[:-1]
seq = izip(
xrange(len(nhids)),
nhids,
nviss,
)
for k, nhid, nvis in seq:
if k == 0 and vis_type == 'gaussian':
rbm = GaussianBinaryRBM(nvis=nvis, nhid=nhid,
batch_size=batch_size,
irange=irange,
rng=rng,
mean_vis=input_mean_vis)
else:
rbm = RBM(nvis - nvis, nhid=nhid,
batch_size=batch_size,
irange=irange,
rng=rng)
layers.append(rbm)
# Create the stack
return StackedBlocks(layers)
class L1_ActivationCost(Cost):
"""
.. todo::
WRITEME
Parameters
----------
target : WRITEME
eps : WRITEME
coeff : WRITEME
"""
def __init__(self, target, eps, coeff):
self.__dict__.update(locals())
del self.self
def expr(self, model, data, ** kwargs):
"""
.. todo::
WRITEME
"""
self.get_data_specs(model)[0].validate(data)
X = data
H = model.P_H_given_V(X)
h = H.mean(axis=0)
err = abs(h - self.target)
dead = T.maximum(err - self.eps, 0.)
assert dead.ndim == 1
rval = self.coeff * dead.mean()
return rval
def get_data_specs(self, model):
"""
.. todo::
WRITEME
"""
return (model.get_input_space(), model.get_input_source())
# The following functionality was deprecated, but is evidently
# still needed to make the RBM work
class _Optimizer(object):
"""
Basic abstract class for computing parameter updates of a model.
"""
def updates(self):
"""Return symbolic updates to apply."""
raise NotImplementedError()
class _SGDOptimizer(_Optimizer):
"""
Compute updates by stochastic gradient descent on mini-batches.
Supports constant learning rates, or decreasing like 1/t after an initial
period.
Parameters
----------
params : object or list
Either a Model object with a .get_params() method, or a list of
parameters to be optimized.
base_lr : float
The base learning rate before annealing or parameter-specific
scaling.
anneal_start : int, optional
Number of steps after which to start annealing the learning
rate at a 1/t schedule, where t is the number of stochastic
gradient updates.
use_adagrad : bool, optional
'adagrad' adaptive learning rate scheme is used. If set to True,
base_lr is used as e0.
kwargs : dict
WRITEME
Notes
-----
The formula to compute the effective learning rate on a parameter is:
<paramname>_lr * max(0.0, min(base_lr, lr_anneal_start/(iteration+1)))
Parameter-specific learning rates can be set by passing keyword
arguments <name>_lr, where name is the .name attribute of a given
parameter.
Parameter-specific bounding values can be specified by passing
keyword arguments <param>_clip, which should be a (min, max) pair.
Adagrad is recommended with sparse inputs. It normalizes the base
learning rate of a parameter theta_i by the accumulated 2-norm of its
gradient: e{ti} = e0 / sqrt( sum_t (dL_t / dtheta_i)^2 )
"""
def __init__(self, params, base_lr, anneal_start=None, use_adagrad=False,
** kwargs):
if hasattr(params, '__iter__'):
self.params = params
elif hasattr(params, 'get_params') and hasattr(
params.get_params, '__call__'):
self.params = params.get_params()
else:
raise ValueError("SGDOptimizer couldn't figure out what to do "
"with first argument: '%s'" % str(params))
if anneal_start == None:
self.anneal_start = None
else:
self.anneal_start = as_floatX(anneal_start)
# Create accumulators and epsilon0's
self.use_adagrad = use_adagrad
if self.use_adagrad:
self.accumulators = {}
self.e0s = {}
for param in self.params:
self.accumulators[param] = theano.shared(
value=as_floatX(0.), name='acc_%s' % param.name)
self.e0s[param] = as_floatX(base_lr)
# Set up the clipping values
self.clipping_values = {}
# Keep track of names already seen
clip_names_seen = set()
for parameter in self.params:
clip_name = '%s_clip' % parameter.name
if clip_name in kwargs:
if clip_name in clip_names_seen:
logger.warning('In SGDOptimizer, at least two parameters '
'have the same name. Both will be affected '
'by the keyword argument '
'{0}.'.format(clip_name))
clip_names_seen.add(clip_name)
p_min, p_max = kwargs[clip_name]
assert p_min <= p_max
self.clipping_values[parameter] = (p_min, p_max)
# Check that no ..._clip keyword is being ignored
for clip_name in clip_names_seen:
kwargs.pop(clip_name)
for kw in six.iterkeys(kwargs):
if kw[-5:] == '_clip':
logger.warning('In SGDOptimizer, keyword argument {0} '
'will be ignored, because no parameter '
'was found with name {1}.'.format(kw, kw[:-5]))
self.learning_rates_setup(base_lr, **kwargs)
def learning_rates_setup(self, base_lr, **kwargs):
"""
Initializes parameter-specific learning rate dictionary and shared
variables for the annealed base learning rate and iteration number.
Parameters
----------
base_lr : float
The base learning rate before annealing or parameter-specific
scaling.
kwargs : dict
WRITEME
Notes
-----
Parameter-specific learning rates can be set by passing keyword
arguments <name>_lr, where name is the .name attribute of a given
parameter.
"""
# Take care of learning rate scales for individual parameters
self.learning_rates = {}
# Base learning rate per example.
self.base_lr = theano._asarray(base_lr, dtype=theano.config.floatX)
# Keep track of names already seen
lr_names_seen = set()
for parameter in self.params:
lr_name = '%s_lr' % parameter.name
if lr_name in lr_names_seen:
logger.warning('In SGDOptimizer, '
'at least two parameters have the same name. '
'Both will be affected by the keyword argument '
'{0}.'.format(lr_name))
lr_names_seen.add(lr_name)
thislr = kwargs.get(lr_name, 1.)
self.learning_rates[parameter] = sharedX(thislr, lr_name)
# Verify that no ..._lr keyword argument is ignored
for lr_name in lr_names_seen:
if lr_name in kwargs:
kwargs.pop(lr_name)
for kw in six.iterkeys(kwargs):
if kw[-3:] == '_lr':
logger.warning('In SGDOptimizer, keyword argument {0} '
'will be ignored, because no parameter '
'was found with name {1}.'.format(kw, kw[:-3]))
# A shared variable for storing the iteration number.
self.iteration = sharedX(theano._asarray(0, dtype='int32'),
name='iter')
# A shared variable for storing the annealed base learning rate, used
# to lower the learning rate gradually after a certain amount of time.
self.annealed = sharedX(base_lr, 'annealed')
def learning_rate_updates(self, gradients):
"""
Compute a dictionary of shared variable updates related to annealing
the learning rate.
Parameters
----------
gradients : WRITEME
Returns
-------
updates : dict
A dictionary with the shared variables representing SGD metadata
as keys and a symbolic expression of how they are to be updated as
values.
"""
ups = {}
if self.use_adagrad:
learn_rates = []
for param, gp in zip(self.params, gradients):
acc = self.accumulators[param]
ups[acc] = acc + (gp ** 2).sum()
learn_rates.append(self.e0s[param] / (ups[acc] ** .5))
else:
# Annealing coefficient. Here we're using a formula of
# min(base_lr, anneal_start / (iteration + 1))
if self.anneal_start is None:
annealed = sharedX(self.base_lr)
else:
frac = self.anneal_start / (self.iteration + 1.)
annealed = tensor.minimum(
as_floatX(frac),
self.base_lr # maximum learning rate
)
# Update the shared variable for the annealed learning rate.
ups[self.annealed] = annealed
ups[self.iteration] = self.iteration + 1
# Calculate the learning rates for each parameter, in the order
# they appear in self.params
learn_rates = [annealed * self.learning_rates[p] for p in
self.params]
return ups, learn_rates
def updates(self, gradients):
"""
Return symbolic updates to apply given a set of gradients
on the parameters being optimized.
Parameters
----------
gradients : list of tensor_likes
List of symbolic gradients for the parameters contained
in self.params, in the same order as in self.params.
Returns
-------
updates : dict
A dictionary with the shared variables in self.params as keys
and a symbolic expression of how they are to be updated each
SGD step as values.
Notes
-----
`cost_updates` is a convenient helper function that takes all
necessary gradients with respect to a given symbolic cost.
"""
ups = {}
# Add the learning rate/iteration updates
l_ups, learn_rates = self.learning_rate_updates(gradients)
safe_update(ups, l_ups)
# Get the updates from sgd_updates, a PyLearn library function.
p_up = dict(self.sgd_updates(self.params, gradients, learn_rates))
# Add the things in p_up to ups
safe_update(ups, p_up)
# Clip the values if needed.
# We do not want the clipping values to force an upcast
# of the update: updates should have the same type as params
for param, (p_min, p_max) in six.iteritems(self.clipping_values):
p_min = tensor.as_tensor(p_min)
p_max = tensor.as_tensor(p_max)
dtype = param.dtype
if p_min.dtype != dtype:
p_min = tensor.cast(p_min, dtype)
if p_max.dtype != dtype:
p_max = tensor.cast(p_max, dtype)
ups[param] = tensor.clip(ups[param], p_min, p_max)
# Return the updates dictionary.
return ups
def cost_updates(self, cost):
"""
Return symbolic updates to apply given a cost function.
Parameters
----------
cost : tensor_like
Symbolic cost with respect to which the gradients of
the parameters should be taken. Should be 0-dimensional
(scalar valued).
Returns
-------
updates : dict
A dictionary with the shared variables in self.params as keys
and a symbolic expression of how they are to be updated each
SGD step as values.
"""
grads = [tensor.grad(cost, p) for p in self.params]
return self.updates(gradients=grads)
def sgd_updates(self, params, grads, stepsizes):
"""
Return a list of (pairs) that can be used
as updates in theano.function to
implement stochastic gradient descent.
Parameters
----------
params : list of Variable
variables to adjust in order to minimize some cost
grads : list of Variable
the gradient on each param (with respect to some cost)
stepsizes : symbolic scalar or list of one symbolic scalar per param
step by this amount times the negative gradient on each iteration
"""
try:
iter(stepsizes)
except Exception:
stepsizes = [stepsizes for p in params]
if len(params) != len(grads):
raise ValueError('params and grads have different lens')
updates = [(p, p - step * gp) for (step, p, gp)
in zip(stepsizes, params, grads)]
return updates
def sgd_momentum_updates(self, params, grads, stepsizes, momentum=0.9):
"""
.. todo::
WRITEME
"""
# if stepsizes is just a scalar, expand it to match params
try:
iter(stepsizes)
except Exception:
stepsizes = [stepsizes for p in params]
try:
iter(momentum)
except Exception:
momentum = [momentum for p in params]
if len(params) != len(grads):
raise ValueError('params and grads have different lens')
headings = [theano.shared(numpy.zeros_like(p.get_value(borrow=True)))
for p in params]
updates = []
for s, p, gp, m, h in zip(stepsizes, params, grads, momentum,
headings):
updates.append((p, p + s * h))
updates.append((h, m * h - (1.0 - m) * gp))
return updates
|
JazzeYoung/VeryDeepAutoEncoder
|
pylearn2/pylearn2/models/rbm.py
|
Python
|
bsd-3-clause
| 58,469
|
[
"Gaussian"
] |
b9824f781f382d08f9c7f21216c8c9a0518064fe1fc60613304133bff8889fae
|
# coding=utf-8
from __future__ import division
import codecs
import os
import platform
import re
from .analysisperiod import AnalysisPeriod
from .designday import ASHRAEClearSky
from .designday import ASHRAETau
from .designday import DesignDay
from .designday import DryBulbCondition
from .designday import HumidityCondition
from .designday import WindCondition
from .dt import Date
from .location import Location
try:
from itertools import izip as zip # python 2
except ImportError:
xrange = range # python 3
class STAT(object):
"""Import data from a local .stat file.
Args:
file_path: Address to a local .stat file.
Properties:
* location
* ashrae_climate_zone
* koppen_climate_zone
* extreme_cold_week
* extreme_hot_week
* typical_winter_week
* typical_spring_week
* typical_summer_week
* typical_autumn_week
* other_typical_weeks
* annual_heating_design_day_996
* annual_heating_design_day_990
* annual_cooling_design_day_004
* annual_cooling_design_day_010
* monthly_cooling_design_days_100
* monthly_cooling_design_days_050
* monthly_cooling_design_days_020
* monthly_cooling_design_days_004
* monthly_db_temp_050
* monthly_wb_temp_050
* monthly_db_temp_range_050
* monthly_wb_temp_range_050
* monthly_found
* standard_pressure_at_elev
* monthly_wind_conditions
* monthly_ws_avg
* monthly_wind_dirs
* monthly_clear_sky_conditions
* monthly_tau_beam
* monthly_tau_diffuse
* file_path
"""
# categories used for parsing text
_months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
_wind_dirs = (0, 45, 90, 135, 180, 225, 270, 315)
_wind_dir_names = ('North', 'NorthEast', 'East', 'SouthEast', 'South',
'SouthWest', 'West', 'NorthWest')
# compiled strings for identifying data in the file
_coord_pattern1 = re.compile(r"{([NSEW])(\s*\d*)deg(\s*\d*)")
_coord_pattern2 = re.compile(r"{([NSEW])(\s*\d*) (\s*\d*)")
_elev_pattern1 = re.compile(r"Elevation\s*[-]*\s*(\d*)m\s*(\S*)")
_elev_pattern2 = re.compile(r"Elevation\s*[-]*\s*(\d*)\s*m\s*(\S*)")
_timez_pattern = re.compile(r"{GMT\s*(\S*)\s*Hours}")
_press_pattern = re.compile(r"Elevation\s*[-]*\s*(\d*)")
_ashraecz_pattern = re.compile(r'Climate type\s"(\S*)"\s\(A')
_koppencz_pattern = re.compile(r'Climate type\s"(\S*)"\s\(K')
_hotweek_pattern = re.compile(r"Extreme Hot Week Period selected:"
r"\s*(\w{3})\s*(\d{1,2}):\s*(\w{3})\s*(\d{1,2}),")
_coldweek_pattern = re.compile(r"Extreme Cold Week Period selected:"
r"\s*(\w{3})\s*(\d{1,2}):\s*(\w{3})\s*(\d{1,2}),")
_typweek_pattern = re.compile(r"(\S*)\s*Typical Week Period selected:"
r"\s*(\w{3})\s*(\d{1,2}):\s*(\w{3})\s*(\d{1,2}),")
_heat_pattern = re.compile(r"Heating\s(\d.*)")
_cool_pattern = re.compile(r"Cooling\s(\d.*)")
_tau_beam_pattern = re.compile(r"taub \(beam\)(.*)")
_tau_diffuse_pattern = re.compile(r"taud \(diffuse\)(.*)")
_db_50_pattern = re.compile(r"Drybulb 5.0%(.*)")
_wb_50_pattern = re.compile(r"Coincident Wetbulb 5.0%(.*)")
_db_100_pattern = re.compile(r"Drybulb 10.%(.*)")
_wb_100_pattern = re.compile(r"Coincident Wetbulb 10.%(.*)")
_db_20_pattern = re.compile(r"Drybulb 2.0%(.*)")
_wb_20_pattern = re.compile(r"Coincident Wetbulb 2.0%(.*)")
_db_04_pattern = re.compile(r"Drybulb 0.4%(.*)")
_wb_04_pattern = re.compile(r"Coincident Wetbulb 0.4%(.*)")
_db_range_50_pattern = re.compile(r"Drybulb range - DB 5%(.*)")
_wb_range_50_pattern = re.compile(r"Wetbulb range - DB 5%(.*)")
_winds_pattern = re.compile(r"Monthly Statistics for Wind Speed[\s\S]*Daily Avg(.*)")
_windd_patterns = tuple(re.compile(
r"Monthly Wind Direction %[\s\S]*" + dir + r"\s(.*)") for dir in _wind_dir_names)
__slots__ = ('_file_path', '_winter_des_day_dict', '_summer_des_day_dict',
'_monthly_wind_dirs', '_location', '_ashrae_climate_zone',
'_koppen_climate_zone', '_extreme_cold_week', '_extreme_hot_week',
'_typical_weeks', '_monthly_db_50', '_monthly_wb_50', '_monthly_db_range_50',
'_monthly_wb_range_50', '_monthly_db_100', '_monthly_wb_100', '_monthly_db_20',
'_monthly_wb_20', '_monthly_db_04', '_monthly_wb_04', '_monthly_wind',
'_stand_press_at_elev', '_monthly_tau_beam', '_monthly_tau_diffuse',
'_header', '_body')
def __init__(self, file_path):
"""Initialize the class.
"""
if file_path is not None:
if not os.path.isfile(file_path):
raise ValueError(
'Cannot find an stat file at {}'.format(file_path))
if not file_path.lower().endswith('stat'):
raise TypeError('{} is not an .stat file.'.format(file_path))
self._file_path = os.path.normpath(file_path)
# defaults empty state for certain parameters
self._winter_des_day_dict = {}
self._summer_des_day_dict = {}
self._monthly_wind_dirs = []
# import the data from the file
if file_path is not None:
self._import_data()
@classmethod
def from_dict(cls, data):
""" Create Stat from a dictionary.
Args:
data: A python dictionary in the following format
.. code-block:: python
{
"location": {}, # ladybug location schema
"ashrae_climate_zone": ""5A, # str
"koppen_climate_zone": "Dfa", # str
"extreme_cold_week": {}, # ladybug analysis period schema
"extreme_hot_week": {}, # ladybug analysis period schema
"typical_weeks": {}, # dict of ladybug analysis period schemas
"heating_dict": {}, # dict containing heating design conditions
"cooling_dict": {}, # dict containing cooling design conditions
"monthly_db_50": [], # list of 12 float values for each month
"monthly_wb_50": [], # list of 12 float values for each month
"monthly_db_range_50": [], # list of 12 float values for each month
"monthly_wb_range_50": [], # list of 12 float values for each month
"monthly_db_100": [], # list of 12 float values for each month
"monthly_wb_100": [], # list of 12 float values for each month
"monthly_db_20": [], # list of 12 float values for each month
"monthly_wb_20": [], # list of 12 float values for each month
"monthly_db_04": [], # list of 12 float values for each month
"monthly_wb_04": [], # list of 12 float values for each month
"monthly_wind": [], # list of 12 float values for each month
"monthly_wind_dirs": [], # matrix with 12 cols for months of the year
#and 8 rows for the cardinal directions.
"standard_pressure_at_elev": 101325, # float value for pressure in Pa
"monthly_tau_beam":[], # list of 12 float values for each month
"monthly_tau_diffuse": [] # list of 12 float values for each month
}
"""
# Initialize the class with all data missing
stat_ob = cls(None)
# Check required and optional keys
option_keys_none = ('ashrae_climate_zone', 'koppen_climate_zone',
'extreme_cold_week', 'extreme_hot_week',
'standard_pressure_at_elev')
option_keys_list = ('monthly_db_50', 'monthly_wb_50',
'monthly_db_range_50', 'monthly_wb_range_50',
'monthly_db_100', 'monthly_wb_100', 'monthly_db_20',
'monthly_wb_20', 'monthly_db_04', 'monthly_wb_04',
'monthly_wind', 'monthly_wind_dirs',
'monthly_tau_beam', 'monthly_tau_diffuse')
option_keys_dict = ('typical_weeks', 'heating_dict', 'cooling_dict')
assert 'location' in data, 'Required key "location" is missing!'
for key in option_keys_none:
if key not in data:
data[key] = None
for key in option_keys_list:
if key not in data:
data[key] = []
for key in option_keys_dict:
if key not in data:
data[key] = {}
# assign the properties of the dictionary to the stat object.
stat_ob._location = Location.from_dict(data['location'])
stat_ob._ashrae_climate_zone = data['ashrae_climate_zone']
stat_ob._koppen_climate_zone = data['koppen_climate_zone']
stat_ob._extreme_cold_week = AnalysisPeriod.from_dict(data['extreme_cold_week'])\
if data['extreme_cold_week'] else None
stat_ob._extreme_hot_week = AnalysisPeriod.from_dict(data['extreme_hot_week'])\
if data['extreme_hot_week'] else None
stat_ob._typical_weeks = {}
for key, val in data['typical_weeks'].items():
if isinstance(val, list):
stat_ob._typical_weeks[key] = [AnalysisPeriod.from_dict(v) for v in val]
else:
stat_ob._typical_weeks[key] = AnalysisPeriod.from_dict(val)
stat_ob._winter_des_day_dict = data['heating_dict']
stat_ob._summer_des_day_dict = data['cooling_dict']
stat_ob._monthly_db_50 = data['monthly_db_50']
stat_ob._monthly_wb_50 = data['monthly_wb_50']
stat_ob._monthly_db_range_50 = data['monthly_db_range_50']
stat_ob._monthly_wb_range_50 = data['monthly_wb_range_50']
stat_ob._monthly_db_100 = data['monthly_db_100']
stat_ob._monthly_wb_100 = data['monthly_wb_100']
stat_ob._monthly_db_20 = data['monthly_db_20']
stat_ob._monthly_wb_20 = data['monthly_wb_20']
stat_ob._monthly_db_04 = data['monthly_db_04']
stat_ob._monthly_wb_04 = data['monthly_wb_04']
stat_ob._monthly_wind = data['monthly_wind']
stat_ob._monthly_wind_dirs = data['monthly_wind_dirs']
stat_ob._stand_press_at_elev = data['standard_pressure_at_elev']
stat_ob._monthly_tau_beam = data['monthly_tau_beam']
stat_ob._monthly_tau_diffuse = data['monthly_tau_diffuse']
return stat_ob
@property
def file_path(self):
"""Get the path to the stat file."""
return self._file_path
def _import_data(self):
"""Import data from a stat file.
"""
# set default state to ironpython for very old ironpython (2.7.0)
iron_python = True
try:
iron_python = True if platform.python_implementation() == 'IronPython' \
else False
except ValueError as e:
# older versions of IronPython fail to parse version correctly
# failed to parse IronPython sys.version: '2.7.5 (IronPython 2.7.5 (2.7.5.0)
# on .NET 4.0.30319.42000 (64-bit))'
if 'IronPython' in str(e):
iron_python = True
if iron_python:
statwin = codecs.open(self.file_path, 'r')
else:
statwin = codecs.open(self.file_path, 'r', encoding='utf-8', errors='ignore')
try:
line = statwin.readline()
# import header with location
self._header = [line] + [statwin.readline() for i in xrange(9)]
self._body = statwin.read()
except Exception as e:
import traceback
raise Exception('{}\n{}'.format(e, traceback.format_exc()))
else:
# import location data
loc_name = self._header[2].strip().replace('Location -- ', '')
if ' - ' in loc_name:
city = ' '.join(loc_name.split(' - ')[:-1])
else: # for US stat files it is full name separated by spaces
city = ' '.join(loc_name.split()[:-2])
country = loc_name.split(' ')[-1]
source = self._header[6].strip().replace('Data Source -- ', '')
station_id = self._header[8].strip().replace('WMO Station ', '')
if iron_python: # IronPython
matches = self._coord_pattern1.findall(
self._header[3].replace('\xb0', 'deg'))
else: # CPython
matches = self._coord_pattern2.findall(self._header[3])
lat_sign = -1 if matches[0][0] == 'S' else 1
latitude = lat_sign * (float(matches[0][1]) + (float(matches[0][2]) / 60))
lon_sign = -1 if matches[1][0] == 'W' else 1
longitude = lon_sign * (float(matches[1][1]) + (float(matches[1][2]) / 60))
time_zone = self._regex_check(self._timez_pattern, self._header[3])
elev_matches = self._elev_pattern1.findall(self._header[4])
if len(elev_matches) == 0:
elev_matches = self._elev_pattern2.findall(self._header[4])
elev_sign = -1 if elev_matches[0][-1].lower() == 'below' else 1
elevation = elev_sign * float(elev_matches[0][0])
self._location = Location()
self._location.city = city
self._location.country = country
self._location.source = source
self._location.station_id = station_id
self._location.latitude = latitude
self._location.longitude = longitude
self._location.time_zone = time_zone
self._location.elevation = elevation
# pull out individual properties
self._stand_press_at_elev = self._regex_check(
self._press_pattern, self._header[5])
self._ashrae_climate_zone = self._regex_check(
self._ashraecz_pattern, self._body)
self._koppen_climate_zone = self._regex_check(
self._koppencz_pattern, self._body)
# pull out extreme and seasonal weeks.
self._extreme_hot_week = self._regex_week_parse(self._hotweek_pattern)
self._extreme_cold_week = self._regex_week_parse(self._coldweek_pattern)
self._typical_weeks = self._regex_typical_week_parse()
# pull out annual design days
winter_vals = self._regex_parse(self._heat_pattern)
for key, val in zip(DesignDay.HEATING_KEYS, winter_vals):
self._winter_des_day_dict[key] = val
summer_vals = self._regex_parse(self._cool_pattern)
for key, val in zip(DesignDay.COOLING_KEYS, summer_vals):
self._summer_des_day_dict[key] = val
# Pull out relevant monthly information
self._monthly_tau_beam = self._regex_parse(self._tau_beam_pattern)
self._monthly_tau_diffuse = self._regex_parse(self._tau_diffuse_pattern)
self._monthly_db_50 = self._regex_parse(self._db_50_pattern)
self._monthly_wb_50 = self._regex_parse(self._wb_50_pattern)
self._monthly_db_100 = self._regex_parse(self._db_100_pattern)
self._monthly_wb_100 = self._regex_parse(self._wb_100_pattern)
self._monthly_db_20 = self._regex_parse(self._db_20_pattern)
self._monthly_wb_20 = self._regex_parse(self._wb_20_pattern)
self._monthly_db_04 = self._regex_parse(self._db_04_pattern)
self._monthly_wb_04 = self._regex_parse(self._wb_04_pattern)
self._monthly_db_range_50 = self._regex_parse(self._db_range_50_pattern)
self._monthly_wb_range_50 = self._regex_parse(self._wb_range_50_pattern)
self._monthly_wind = self._regex_parse(self._winds_pattern)
for direction in self._windd_patterns:
dirs = self._regex_parse(direction)
if dirs != []:
self._monthly_wind_dirs.append(dirs)
if self._monthly_wind_dirs == []:
self._monthly_wind_dirs = [[0] * 12 for i in xrange(8)]
finally:
statwin.close()
def _regex_check(self, regex_pattern, search_space):
matches = regex_pattern.findall(search_space)
if len(matches) > 0:
try:
return float(matches[0])
except ValueError:
return matches[0]
else:
return None
def _regex_week(self, match):
if len(match) == 4:
try:
st_mon = int(self._months.index(match[0])) + 1
end_mon = int(self._months.index(match[2])) + 1
st_day = int(match[1])
end_day = int(match[3])
except ValueError:
return None
return AnalysisPeriod(st_mon, st_day, 0, end_mon, end_day, 23)
else:
return None
def _regex_week_parse(self, regex_pattern):
matches = regex_pattern.findall(self._body)
if len(matches) > 0:
return self._regex_week(matches[0])
else:
return None
def _regex_typical_week_parse(self):
typ_weeks = {'other': []}
matches = self._typweek_pattern.findall(self._body)
for match in matches:
a_per = self._regex_week(match[1:])
if 'winter' in match[0]:
typ_weeks['winter'] = a_per
elif 'spring' in match[0]:
typ_weeks['spring'] = a_per
elif 'summer' in match[0]:
typ_weeks['summer'] = a_per
elif 'autumn' in match[0]:
typ_weeks['autumn'] = a_per
else:
typ_weeks['other'].append(a_per)
return typ_weeks
def _regex_parse(self, regex_pattern):
matches = regex_pattern.findall(self._body)
if len(matches) > 0:
raw_txt = matches[0].strip().split('\t')
try:
return [float(i) if i != 'N' else None for i in raw_txt]
except ValueError:
return [str(i) for i in raw_txt]
else:
return []
@property
def monthly_found(self):
if self._monthly_db_range_50 != [] and self._monthly_wb_range_50 != [] \
and self._monthly_wind != [] \
and self._stand_press_at_elev is not None:
return True
else:
return False
@property
def location(self):
"""Return ladybug location object."""
return self._location
@property
def ashrae_climate_zone(self):
"""Return a text string indicating the ASHRAE climate zone.
Numbers in the zone denote average temperature (0 = Hottest; 8 = Coldest)
Letters in the zone denote wetness (A = Humid; B = Dry; C = Marine)
"""
return self._ashrae_climate_zone
@property
def koppen_climate_zone(self):
"""Return a text string indicating the Koppen climate zone.
The Koppen climate classification is the most widely used climate
classification system and combines average annual and monthly
temperatures, precipitation, and the seasonality of precipitation.
"""
return self._koppen_climate_zone
@property
def extreme_cold_week(self):
"""AnalysisPeriod for the coldest week within the corresponding EPW."""
return self._extreme_cold_week
@property
def extreme_hot_week(self):
"""AnalysisPeriod for the hottest week within the corresponding EPW."""
return self._extreme_hot_week
@property
def typical_winter_week(self):
"""AnalysisPeriod for a typical winter week within the corresponding EPW."""
try:
return self._typical_weeks['winter']
except KeyError:
return None
@property
def typical_spring_week(self):
"""AnalysisPeriod for a typical spring week within the corresponding EPW."""
try:
return self._typical_weeks['spring']
except KeyError:
return None
@property
def typical_summer_week(self):
"""AnalysisPeriod for a typical summer week within the corresponding EPW."""
try:
return self._typical_weeks['summer']
except KeyError:
return None
@property
def typical_autumn_week(self):
"""AnalysisPeriod for a typical autumn week within the corresponding EPW."""
try:
return self._typical_weeks['autumn']
except KeyError:
return None
@property
def other_typical_weeks(self):
"""List of AnalysisPeriods for typical weeks outside of the seasonal weeks."""
return self._typical_weeks['other']
@property
def annual_heating_design_day_996(self):
"""A design day object representing the annual 99.6% heating design day."""
if bool(self._winter_des_day_dict):
return DesignDay.from_ashrae_dict_heating(
self._winter_des_day_dict, self.location, False,
self._stand_press_at_elev)
else:
return None
@property
def annual_heating_design_day_990(self):
"""A design day object representing the annual 99.0% heating design day."""
if bool(self._winter_des_day_dict):
return DesignDay.from_ashrae_dict_heating(
self._winter_des_day_dict, self.location, True,
self._stand_press_at_elev)
else:
return None
@property
def annual_cooling_design_day_004(self):
"""A design day object representing the annual 0.4% cooling design day."""
if bool(self._summer_des_day_dict):
tau = None
month_num = int(self._summer_des_day_dict['Month'])
if self._monthly_tau_beam != [] and self._monthly_tau_diffuse != [] \
and self._monthly_tau_beam[month_num - 1] is not None and \
self._monthly_tau_diffuse[month_num - 1] is not None:
tau = (self._monthly_tau_beam[month_num - 1],
self._monthly_tau_diffuse[month_num - 1])
return DesignDay.from_ashrae_dict_cooling(
self._summer_des_day_dict, self.location, False,
self._stand_press_at_elev, tau)
else:
return None
@property
def annual_cooling_design_day_010(self):
"""A design day object representing the annual 1.0% cooling design day."""
if bool(self._summer_des_day_dict):
tau = None
month_num = int(self._summer_des_day_dict['Month'])
if self._monthly_tau_beam != [] and self._monthly_tau_diffuse != [] \
and self._monthly_tau_beam[month_num - 1] is not None and \
self._monthly_tau_diffuse[month_num - 1] is not None:
tau = (self._monthly_tau_beam[month_num - 1],
self._monthly_tau_diffuse[month_num - 1])
return DesignDay.from_ashrae_dict_cooling(
self._summer_des_day_dict, self.location, True,
self._stand_press_at_elev, tau)
else:
return None
@property
def monthly_cooling_design_days_050(self):
"""A list of 12 objects representing monthly 5.0% cooling design days."""
if not self.monthly_found or self._monthly_db_50 == [] \
or self._monthly_wb_50 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_50, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_50]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'5% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)]
@property
def monthly_cooling_design_days_100(self):
"""A list of 12 objects representing monthly 10.0% cooling design days."""
if not self.monthly_found or self._monthly_db_100 == [] \
or self._monthly_wb_100 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_100, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_100]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'10% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)]
@property
def monthly_cooling_design_days_020(self):
"""A list of 12 objects representing monthly 2.0% cooling design days."""
if not self.monthly_found or self._monthly_db_20 == [] \
or self._monthly_wb_20 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_20, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_20]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'2% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)]
@property
def monthly_cooling_design_days_004(self):
"""A list of 12 objects representing monthly 0.4% cooling design days."""
if not self.monthly_found or self._monthly_db_04 == [] \
or self._monthly_wb_04 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_04, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_04]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'0.4% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)]
@property
def monthly_db_temp_050(self):
"""A list of 12 float values for monthly 5.0% dry bulb temperature."""
return self._monthly_db_50
@property
def monthly_wb_temp_050(self):
"""A list of 12 float values for monthly 5.0% wet bulb temperature."""
return self._monthly_wb_50
@property
def monthly_db_temp_range_050(self):
"""A list of 12 values for monthly ranges of dry bulb temperatures at 5.0%."""
return self._monthly_db_range_50
@property
def monthly_wb_temp_range_050(self):
"""A list of 12 values for monthly ranges of wet bulb temperatures at 5.0%."""
return self._monthly_wb_range_50
@property
def standard_pressure_at_elev(self):
"""The standard pressure on pascals at the elevation of the location."""
return self._stand_press_at_elev
@property
def monthly_wind_conditions(self):
"""A list of 12 monthly wind conditions that are used on the design days."""
return [WindCondition(x, y) for x, y in zip(
self._monthly_wind, self.monthly_wind_dirs)]
@property
def monthly_ws_avg(self):
"""A list of 12 float values for monthly average wind speeds."""
return self._monthly_wind
@property
def monthly_wind_dirs(self):
"""A list of prevailing wind directions for each month."""
mwd = zip(*self._monthly_wind_dirs)
return [self._wind_dirs[mon.index(max(mon))] for mon in mwd]
@property
def monthly_clear_sky_conditions(self):
"""A list of 12 monthly clear sky conditions that are used on the design days."""
if self._monthly_tau_diffuse is [] or self._monthly_tau_beam is []:
return [ASHRAEClearSky(Date(i, 21)) for i in xrange(1, 13)]
return [ASHRAETau(Date(i, 21), x, y) for i, x, y in zip(
list(xrange(1, 13)), self._monthly_tau_beam, self._monthly_tau_diffuse)]
@property
def monthly_tau_beam(self):
"""A list of 12 float values for monthly beam optical depth.
These values can be used to generate ASHRAE Revised Clear Skies, which
are intended to determine peak solar load and sizing parmeters for
HVAC systems.
"""
return self._monthly_tau_beam
@property
def monthly_tau_diffuse(self):
"""Return a list of 12 float values for monthly diffuse optical depth.
These values can be used to generate ASHRAE Revised Clear Skies, which
are intended to determine peak solar load and sizing parmeters for
HVAC systems.
"""
return self._monthly_tau_diffuse
def to_dict(self):
"""Convert the stat object to a dictionary."""
def dictify_dict(base_dict):
new_dict = {}
for key, val in base_dict.items():
if isinstance(val, list):
new_dict[key] = [v.to_dict() for v in val]
else:
new_dict[key] = val.to_dict()
return new_dict
return {
'location': self.location.to_dict(),
'ashrae_climate_zone': self.ashrae_climate_zone,
'koppen_climate_zone': self.koppen_climate_zone,
'extreme_cold_week': self.extreme_cold_week.to_dict()
if self.extreme_cold_week else None,
'extreme_hot_week': self.extreme_hot_week.to_dict()
if self.extreme_cold_week else None,
'typical_weeks': dictify_dict(self._typical_weeks),
'heating_dict': self._winter_des_day_dict,
'cooling_dict': self._summer_des_day_dict,
"monthly_db_50": self._monthly_db_50,
"monthly_wb_50": self._monthly_wb_50,
"monthly_db_range_50": self._monthly_db_range_50,
"monthly_wb_range_50": self._monthly_wb_range_50,
"monthly_db_100": self._monthly_db_100,
"monthly_wb_100": self._monthly_wb_100,
"monthly_db_20": self._monthly_db_20,
"monthly_wb_20": self._monthly_wb_20,
"monthly_db_04": self._monthly_db_04,
"monthly_wb_04": self._monthly_wb_04,
"monthly_wind": self._monthly_wind,
"monthly_wind_dirs": self._monthly_wind_dirs,
"standard_pressure_at_elev": self.standard_pressure_at_elev,
"monthly_tau_beam": self.monthly_tau_beam,
"monthly_tau_diffuse": self.monthly_tau_diffuse,
"type": 'STAT'
}
def ToString(self):
"""Overwrite .NET ToString."""
return self.__repr__()
def __repr__(self):
"""stat file representation."""
return "STAT [%s]" % self.location.city
|
ladybug-analysis-tools/ladybug-core
|
ladybug/stat.py
|
Python
|
gpl-3.0
| 31,851
|
[
"EPW"
] |
569c8bf340894a97d73df645d74a0f44f3adf5a7f41108085aa857f519e54be7
|
# bs.po
val = {"" : "Project-Id-Version: sheltermanager\nReport-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\nPOT-Creation-Date: 2013-01-24 10:55+0000\nPO-Revision-Date: 2012-06-23 14:23+0000\nLast-Translator: Nevena <snevenas@gmail.com>\nLanguage-Team: Bosnian <bs@li.org>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Launchpad-Export-Date: 2014-01-23 05:31+0000\nX-Generator: Launchpad (build 16901)\n",
"{plural3} people with active reservations have not been homechecked" : "",
"Donation Type" : "",
"Use animal comments if photo notes are blank" : "",
"Half-Yearly" : "Polugodisnje",
"Select recommended" : "",
"At least the last name should be completed." : "",
"Chinese Crested Dog" : "Kineski cubasti pas",
"New template" : "",
"Include incomplete medical and vaccination records when generating document templates" : "",
"Due today" : "",
"You can prefix your term in the search box with a: to search only animals, p: to search only people, wl: to search waiting list entries, la: to search lost animals and fa: to search found animals." : "Možete dodati prefiks ispred traženog termina u polje za pretraživanje: a: pretraga samo zivotinja; wl: pretraga liste cekanja za prijem u skloniste, la: pretraga nestalih zivotinja i fa; pretraga pronadjenih zivotinja.",
"Warnings" : "",
"Corded" : "",
"Edit diet" : "",
"Stolen {0}" : "",
"Domestic Long Hair" : "Domaca dugodlaka",
"{plural2} year" : "",
"Export this database in various formats" : "",
"Person - Name and Address" : "",
"Not For Adoption" : "",
"The date the animal was tattooed" : "",
"Entered From" : "",
"Base Color" : "",
"TT = first and second letter of animal type" : "",
"Reconcile" : "",
"Default Species" : "",
"View Manual" : "",
"Search Results for '{0}'" : "",
"Lost Animal - Details" : "",
"Remove the heartworm test fields from animal health details" : "",
"Income::Adoption" : "Prihod: Udomljavanje",
"{plural0} animal was euthanized" : "",
"If this person is a member, their membership number." : "",
"The size of this animal" : "",
"Use Automatic Insurance Numbers" : "",
"Cane Corso Mastiff" : "Italijanski mastif",
"Negative" : "Negativan",
"View Found Animal" : "",
"More diary notes" : "",
"Show animal thumbnails in movement and medical books" : "",
"Install the selected reports to your database" : "",
"Reservation date cannot be after cancellation date." : "",
"American Staffordshire Terrier" : "Amersicki staford terijer",
"Publish to folder" : "",
"New Diary" : "",
"This person has been banned from adopting animals." : "",
"Amazon" : "Amazonska",
"Email person" : "",
"Default destination account for donations" : "",
"Affenpinscher" : "Affenpinscher - Majmunski pinc",
"Homecheck History" : "",
"Dosage" : "",
"Urgency" : "",
"Bank::Savings" : "Banka::Stednja",
"Last Name" : "",
"Tuesday" : "",
"Ginger" : "Narandzasta / djumbir",
"New Regimen" : "",
"Australian Cattle Dog/Blue Heeler" : "Australijski stocarski pas",
"Boarding cost type" : "",
"Owner" : "Vlasnik",
"Medical Book" : "Zdravstveni karton",
"Date lost cannot be blank." : "",
"Irish Terrier" : "Irski terijer",
"Mark selected donations received" : "",
"Found Animal: {0}" : "Pronadjena zivotinja: {0}",
"Edit cost" : "Uredjivanje troskova",
"Jump to donations" : "",
"Successfully posted to Facebook" : "",
"Adoption Number" : "",
"McNab" : "",
"Munsterlander" : "",
"Recently deceased shelter animals (last 30 days)." : "",
"Staff record" : "",
"Add a log entry" : "Dodaj zapis",
"Generate document from this donation" : "",
"Create waiting list records from the selected forms" : "",
"ASM will remove this animal from the waiting list after a set number of weeks since the last owner contact date." : "",
"June" : "",
"The secondary breed of this animal" : "",
"Stay" : "",
"Lost to" : "",
"Removed" : "",
"Reservation Book" : "Knjiga rezervacija",
"Entering 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' or 'members' in the search box will show you those groups of people." : "",
"Points for matching features" : "",
"Credit Card" : "Kreditna kartica",
"Cockatoo" : "Kakadu",
"Perform Homecheck" : "",
"Person" : "Osoba",
"Debit Card" : "",
"View Report" : "",
"Generate a new animal code" : "",
"Oriental Tabby" : "",
"Address Contains" : "",
"Financial" : "Finansije",
"Appaloosa" : "",
"Text" : "Tekst",
"Test book" : "",
"Header" : "",
"Heartworm Test Date" : "",
"English Coonhound" : "Engleski gonic rakuna",
"Owner Vet" : "",
"Add movement" : "",
"Tibetan Spaniel" : "Tibetanski spanijel",
"Some browsers allow shortcut keys, press SHIFT+ALT+A in Chrome or Firefox to jump to the animal adoption screen." : "Pojedini preglednici dozvoljavaju skracene komande, za brzi prelaz na ekran sa podacima za udomljavnje u Crome ili Firefox pregledniku pritisnite SHIFT+ALT+A.",
"Exclude this image when publishing" : "",
"Chocolate Labrador Retriever" : "Cokoladni labrador retriver",
"This animal has been FIV/L tested" : "",
"Don't scale" : "",
"Allergies" : "Alergija",
"Chart (Bar)" : "",
"Keep table headers visible when scrolling" : "",
"Tooltip" : "",
"Animal food costs" : "Troskovi zivotinjske hrane",
"{plural2} urgent entries on the waiting list" : "",
"U (Unwanted Cat)" : "N (Nezeljena macka)",
"MeetAPet Publisher" : "",
"Add a medical regimen" : "",
"Alaskan Malamute" : "Aljaski malamut",
"Wheaten Terrier" : "",
"Glen of Imaal Terrier" : "Glen of Imaal terijer",
"Irish Water Spaniel" : "",
"{plural3} shelter animals have people looking for them" : "",
"Mountain Dog" : "Planinski pas",
"Silky Terrier" : "Svilenasti terijer",
"Peacock/Pea fowl" : "Paun",
"White German Shepherd" : "Bijeli njemacki ovcar",
"Create a new animal from this waiting list entry" : "",
"To continue using ASM, please renew {0}" : "",
"Please select a PDF, HTML or JPG image file to attach" : "",
"Patterdale Terrier (Fell Terrier)" : "",
"Old Password" : "",
"Pixie-Bob" : "",
"Great Dane" : "Njemacka doga",
"Executing..." : "",
"New Log" : "",
"Added by {0} on {1}" : "Added by {0} on {1}",
"Sloughi" : "Marokanski hrt, Slugi",
"Expenses::Electricity" : "Troskovi::Struja",
"Species to use when publishing to third party services and adoption sites" : "",
"Add found animal" : "Dodaj pronadjenu zivotinju",
"Show codes on the shelter view screen" : "",
"Rotate image 90 degrees anticlockwise" : "",
"FTP username" : "",
"Make this the default video link when publishing to the web" : "",
"Test marked as performed for {0} - {1}" : "",
"Rough" : "Gruba",
"Use a single breed field" : "",
"Blue" : "Plava",
"{0} treatments every {1} months" : "{0} treatments svakog {1} mejseca",
"Flemish Giant" : "",
"Edit my diary notes" : "",
"Removal Reason" : "",
"If the shelter provides initial insurance cover to new adopters, the policy number" : "",
"Add {0}" : "",
"Scottish Terrier Scottie" : "Skotski terijer",
"Found animals reported in the last 30 days." : "",
"Create Log" : "",
"This animal is a crossbreed" : "",
"On Foster (in figures)" : "",
"{plural0} shelter animal has people looking for them" : "",
"Quarterhorse" : "",
"Housetrained" : "Naucen na zivot u kuci",
"Name and Address" : "",
"Remove the good with fields from animal notes" : "",
"Donation" : "Donacija",
"(none)" : "",
"Path" : "",
"weeks" : "sedmica",
"Flat-coated Retriever" : "Ravnodlaki retriver",
"Mobile" : "",
"Address" : "Adresa",
"{plural3} unaltered animals have been adopted in the last month" : "",
"Positive/Negative" : "",
"These are the default values for these fields when creating new records." : "",
"{plural0} test needs to be performed today" : "",
"Black and White" : "Crno-bijela",
"Fawn" : "",
"Reference" : "",
"Lancashire Heeler" : "",
"Ocicat" : "",
"Goose" : "Guska",
"Default image for this record and the web" : "",
"{plural1} weeks" : "",
"Mouse" : "Mis",
"The date this animal was reserved" : "",
"Change Investigation" : "",
"Default daily boarding cost" : "",
"Enable accounts functionality" : "",
"Lost Animal Contact" : "",
"Diary note cannot be blank" : "",
"Accountant" : "",
"Investigation" : "",
"Animal Name" : "",
"Day Pivot" : "",
"Type" : "Tip",
"Area where the animal was lost" : "",
"Message successfully sent" : "",
"Username" : "",
"Vaccinations: {0}, Tests: {1}, Medical Treatments: {2}, Costs: {3}, Total Costs: {4} Total Donations: {5}, Balance: {6}" : "",
"Add Found Animal" : "",
"You will need to upgrade to iOS 6 or higher to upload files." : "",
"Owners Vet" : "",
"Heartworm Tested" : "Testirnje na dirofilariozu (srcanog crva)",
"Rabbit" : "Kunic",
"Manchester Terrier" : "Mancesterski terijer",
"Hold" : "",
"{plural0} medical treatment needs to be administered today" : "",
"Health Problems" : "Zdravstveni problemi",
"This person has been banned from adopting animals" : "Ovoj osobi je zabranjeno udomljavanje zivotinja",
"Bank::Deposit" : "Banka::Depozit",
"Adopt" : "",
"{plural3} animals died" : "",
"{plural0} day." : "",
"{plural3} animals were transferred to other shelters" : "",
"Found Animal {0}" : "Pronadjena zivotinja {0}",
"Enable sharing animals via Facebook" : "",
"Enable FTP uploading" : "",
"Add report" : "",
"New password and confirmation password don't match." : "",
"Add Diets" : "",
"Email users their diary notes each day" : "",
"September" : "",
"When posting an animal to Facebook, make a note of it in the log with this type" : "",
"Investigations" : "",
"Not eligible for gift aid" : "",
"days" : "dana",
"Urgent" : "Hitno",
"Litter" : "Leglo",
"Bank current account" : "Tekuci racun",
"The date the animal was altered" : "",
"Include CSV header line" : "",
"Found Animal - Details" : "",
"Longest On Shelter" : "",
"Update system options" : "",
"Liver and White" : "Boja cokolade i bijela",
"UUUUUUUUUU or UUUU = unique number" : "",
"Lookup Values" : "",
"If you assign view or edit roles, only users within those roles will be able to view and edit this account." : "",
"Create diary notes from a task" : "",
"Due" : "",
"Syntax check this SQL" : "",
"Default Vaccination Type" : "",
"Additional date field '{0}' contains an invalid date." : "",
"Electricity Bills" : "Racuni za struju",
"Quarterly" : "Kvartalno",
"Set this to 0 to never automatically remove." : "",
"Tests" : "",
"Points for matching species" : "",
"Voucher Types" : "",
"Welcome!" : "",
"Liability" : "Obaveze",
"Message from {0}" : "",
"Publishing Logs" : "",
"Search" : "Pretrazivanje",
"Email a copy of the selected documents" : "",
"Remove the investigation tab from person records" : "",
"Contact Contains" : "",
"Find a lost animal" : "Nadji izgubljenu zivotinju",
"{plural2} medical treatments need to be administered today" : "",
"Creme DArgent" : "",
"Neapolitan Mastiff" : "Napuljski mastif",
"Removal reason" : "",
"Code" : "Šifra",
"Features" : "Karakteristike",
"Sep" : "",
"Dove" : "Golub",
"The microchip number" : "",
"Sex" : "Spol",
"Akita" : "Akita",
"View Donation" : "",
"Frequency" : "",
"Generated document '{0}'" : "",
"Softbill (Other)" : "",
"Trial adoption" : "",
"Movements" : "",
"Date lost cannot be blank" : "",
"Code format tokens:" : "",
"Generate image thumbnails as tn_$$IMAGE$$" : "",
"Daily Boarding Cost" : "",
"Waiting list urgency update period in days" : "",
"Add donation" : "",
"Creating..." : "Kreiranje",
"The litter this animal belongs to" : "",
"Additional fields need a name, label and type." : "",
"Delete Cost" : "",
"Clone" : "",
"Retailer book" : "",
"Shelter animal {0} '{1}'" : "",
"White and Black" : "",
"Rabies Tag" : "",
"Bank" : "Banka",
"Find a found animal" : "Nadji pronadjenu zivotinju",
"Bulk Complete Diary" : "",
"Bull Terrier" : "Bulterijer",
"Reports" : "Izvjestaji",
"Sorry. ASM will not work without Javascript." : "",
"Login" : "",
"Vaccination marked as given for {0} - {1}" : "",
"AdoptAPet Publisher" : "",
"Location and Species" : "",
"Date reported cannot be blank." : "",
"{plural0} person with an active reservation has not been homechecked" : "",
"When a message is created, email it to each matching user" : "",
"Allow overriding of the movement number on the Move menu screens" : "",
"FoundLost animal entry {0} successfully created." : "",
"Start Of Day" : "",
"Prefill new media notes for animal images with animal comments if left blank" : "",
"Shelter Details" : "",
"HelpingLostPets Publisher" : "",
"Date put on cannot be blank" : "",
"Curly" : "",
"Tabby and White" : "",
"Template" : "",
"Mark an animal deceased" : "Oznacite uginule zivotinje",
"New Owner" : "",
"Start date must be a valid date" : "",
"SQL Interface" : "SQL interfejs/sucelje",
"Time On List" : "",
"Norwegian Lundehund" : "Norveski lovacki pas",
"Shelter stats (this year)" : "",
"Vaccinate Animal" : "",
"Cocker Spaniel" : "Koker spanijel",
"View Lost Animal" : "",
"Returned to Owner {0}" : "",
"Edit diary notes" : "Uredi zapise u dnevniku",
"FTP password" : "",
"Waiting list entries matching '{0}'." : "",
"Account code '{0}' is not valid." : "",
"Mark treatments given today" : "",
"Any markings or distinguishing features the animal has" : "",
"Account" : "",
"Havana" : "Havana",
"Black and Tan" : "Crno-zuta",
"You can upload images called logo.jpg and splash.jpg to the Settings- Reports-Extra Images screen to override the login splash screen and logo in the upper left corner of the application." : "",
"Find animal columns" : "",
"Belgian Hare" : "Belgijski zec",
"Accounts need a code." : "",
"Death Reasons" : "",
"Add litter" : "",
"Add Person" : "",
"Leave" : "",
"Sorrel Tortoiseshell" : "",
"Default Cost" : "",
"Organization" : "",
"Reason for entry" : "",
"Belgian Shepherd Malinois" : "Belgijski ovcar malinoa",
"Peruvian Paso" : "",
"Date of birth is not valid" : "Datum rodjenja nije validan",
"Expenses::Phone" : "Troskovi::Telefon",
"Tricolour" : "Trobojna",
"Movement numbers must be unique." : "",
"Change Movement" : "",
"Quicklinks" : "",
"Norwich Terrier" : "Norvicki terijer",
"Find person" : "Nadji osobu",
"Delete Found Animal" : "",
"Abyssinian" : "Abisinac",
"The date the animal was adopted" : "",
"Access System Menu" : "",
"Show the full diary (instead of just my notes) on the home page" : "",
"Include quarantined animals" : "",
"This person is not flagged as a retailer and cannot handle retailer movements." : "",
"Jack Russell Terrier" : "Jack Russel terijer",
"Priority" : "",
"Foster" : "",
"Sick/Injured" : "Bolesna/Povrijedjena",
"View Animals" : "",
"Save this record" : "",
"Animal code format" : "",
"Microchip" : "Mikrocip",
"Dogs" : "",
"Bunny Rabbit" : "",
"Dwarf" : "",
"New Cost" : "",
"Blue Tortie" : "",
"Foster book" : "Registar privremenih udomitelja",
"Select person to merge" : "",
"Retailer movement successfully created." : "",
"Terrier" : "Terijer",
"Advanced" : "",
"Newfoundland Dog" : "Njufaunlender",
"How urgent is it that we take this animal?" : "",
"Settings" : "Podesavanja",
"Warn when creating multiple reservations on the same animal" : "",
"These fields determine which columns are shown on the find animal and find person screens." : "",
"The date this animal was found" : "",
"Return an animal from transfer" : "",
"New Test" : "",
"{plural0} trial adoption has ended" : "",
"RabiesTag" : "Privjesak sa podacima o vakcinama bjesnila",
"Illyrian Sheepdog" : "Ilirski ovcarski pas",
"Found Animal Contact" : "",
"Automatically cancel any outstanding reservations on an animal when it is adopted" : "",
"Remove the document repository functionality from menus" : "",
"Generate a document from this person" : "",
"These numbers are for shelters who have agreements with insurance companies and are given blocks of policy numbers to allocate." : "",
"Date found cannot be blank." : "",
"Transferred In" : "",
"A short version of the reference number" : "",
"Given" : "",
"Paso Fino" : "",
"Scottish Fold" : "",
"Log successfully added." : "",
"Add Users" : "",
"All animals who are flagged as not for adoption." : "",
"Cell Phone" : "",
"Columns" : "",
"Movement" : "Kretanje",
"Visual Theme" : "",
"Attach Link" : "",
"The date the animal was microchipped" : "",
"New Password" : "",
"Boxer" : "Bokser",
"Cheque" : "",
"Eskimo Dog" : "Eskimski pas",
"{0} treatments every {1} weeks" : "{0} treatments svake {1} sedmice",
"A publish job is already running." : "",
"Black Labrador Retriever" : "Crni labrador retriver",
"Tonkinese" : "",
"CC" : "",
"You can bookmark search results, animals, people and most data entry screens." : "Mozete oznaciti rezultate pretrazivanja, zivotinje, osobe i vecinu prozora za unos podataka",
"Publishing complete." : "",
"Create Animal" : "",
"Unit within the location, eg: pen or cage number" : "",
"Javanese" : "Javanska",
"Hidden comments about the animal" : "",
"Income::OpeningBalances" : "Prihod:PocetnoStanje",
"{plural2} shelter animals have people looking for them" : "",
"Whippet" : "",
"Lop Eared" : "",
"{plural1} vaccinations need to be administered today" : "",
"Escaped" : "Pobjegla",
"Error contacting server." : "",
"ASM 3 is compatible with your iPad and other tablets." : "ASM 3 je kompatabilan sa iPadom i drugim tablet racunarima.",
"Can't reserve an animal that has an active movement." : "",
"Health and Identification" : "",
"The date this animal was removed from the waiting list" : "",
"Current Vet" : "",
"Reservation" : "Rezervacija",
"Delete Movement" : "",
"There is not enough information in the form to create a lost animal record (need a description and area lost)." : "",
"Add vaccination" : "",
"Match lost and found animals" : "",
"Exclude animals who are aged under" : "",
"Shelter stats (this month)" : "",
"IP restriction is a space-separated list of IP netblocks in CIDR notation that this user is *only* permitted to login from (eg: 192.168.0.0/24 127.0.0.0/8). If left blank, the user can login from any address." : "",
"Donations of type" : "",
"Movements require an animal." : "Kretanje zahtjeva zivotinju",
"Create a cost record" : "",
"Ragdoll" : "",
"Selkirk Rex" : "",
"Toucan" : "Tukan",
"Border Terrier" : "Granicarski terijer",
"Update animals with SmartTag Pet ID" : "Azuriraj zivotinje sa SmartTag Pet ID",
"Retriever" : "Retriver",
"Email Address" : "",
"Add cost" : "Dodavanje troskova",
"Animal - Entry" : "",
"The SmartTag PETID number" : "",
"This animal was euthanized" : "",
"Edit litter" : "",
"Home" : "Poceta stranica",
"{plural3} months" : "",
"Yellow Labrador Retriever" : "Zlatni labrador retriver",
"Delete Medical Records" : "",
"The result of the heartworm test" : "",
"Income::Shop" : "Prihod:Prodavnica",
"Settings, Options" : "",
"Find person columns" : "",
"City contains" : "",
"All staff on file." : "",
"Duration" : "",
"Table" : "",
"F (Stray Dog)" : "PL (Pas lutalica)",
"Heartworm Test Result" : "",
"{plural1} animals were reclaimed by their owners" : "",
"Homechecked By" : "",
"Errors" : "",
"The person record to merge must be different from the original." : "",
"Guinea Pig" : "Gvinejsko prase",
"Smooth Fox Terrier" : "",
"Unsaved Changes" : "",
"Default Location" : "",
"Arabian" : "",
"West Highland White Terrier Westie" : "Zapadnoskotski bijeli terijer",
"SubTotal" : "",
"Cancel unadopted reservations after this many days, or 0 to never cancel" : "",
"Add diet" : "",
"Looking for" : "",
"{plural0} animal was adopted" : "",
"In SubTotal" : "",
"View Shelter Animals" : "",
"Good with Children" : "Dobara odnos s djecom",
"Silver Marten" : "",
"Donations require a received date" : "",
"Deceased Date" : "",
"Messages" : "",
"Insurance No" : "",
"Lookup data" : "Pretraga podatka",
"Edit form field" : "",
"Create a new animal by copying this one" : "",
"Add diary" : "Dodavanje dnevnika",
"Add user" : "",
"From retailer is only valid on adoption movements." : "",
"Edit media notes" : "",
"Document templates" : "Preslosci dokumenta",
"Shih Tzu" : "",
"White and Brindle" : "",
"Produce a PDF of printable labels" : "",
"When adding animals" : "",
"You can override the search result sort by adding one of the following to the end of your search - sort:az, sort:za, sort:mr, sort:lr" : "Mozete rafinirati rezultate pretrage dodavanjem iza vaseg termina po zelji: sort:az, sort: za; sort:mr; sort:lr",
"{plural2} weeks" : "",
"This animal is bonded with {0}. Adoption movement records will be created for all bonded animals." : "",
"Species" : "Vrsta",
"Norwegian Forest Cat" : "Norveska sumska",
"Edit HTML publishing templates" : "Uredjivanje HTML predlozaka za objavljivanje",
"Change Accounts" : "",
"System" : "Sistem",
"SQL is syntactically correct." : "",
"Movements require an animal" : "",
"Hold until {0}" : "",
"Great Pyrenees" : "Pirinejski planinski pas",
"Online form fields need a name and label." : "",
"Mastiff" : "Mastif",
"Change Donation" : "",
"View Movement" : "",
"Facebook" : "",
"Kerry Blue Terrier" : "Irski plavi terijer",
"Only publish a set number of animals" : "",
"Lost Animal - Additional" : "",
"Staffordshire Bull Terrier" : "Stafordski bulterijer",
"Postage costs" : "Postanski troskovi",
"New online form" : "",
"Return an animal from adoption" : "Povratak zivotinje zbog neuspjelog udomljavanja",
"Log date must be a valid date" : "",
"Boarding Cost" : "",
"Deposit" : "",
"Found to" : "",
"Amber" : "Boje cilibara",
"Subject" : "",
"Image" : "Slika",
"months" : "mjeseca",
"Entry Reason Category" : "",
"Role is in use and cannot be deleted." : "",
"Similar Person" : "",
"Animal - Health and Identification" : "",
"{plural3} animals were euthanized" : "",
">>" : "",
"Tattoo" : "Tetovaza",
"Feb" : "",
"{plural2} days." : "",
"Required date must be a valid date" : "",
"Lovebird" : "Agapornis papaji, ljubavna ptica",
"New Field" : "",
"Draft" : "",
"Animals matching '{0}'." : "",
"Add Movement" : "",
"Vizsla" : "Vizla",
"Pug" : "Mops",
"Add voucher" : "",
"Marketer" : "",
"Aged From" : "",
"The default username is 'user' with the password 'letmein'" : "",
"Publisher Species" : "",
"Breed to use when publishing to third party services and adoption sites" : "",
"Link to an external web resource" : "",
"This will permanently remove this additional field and ALL DATA CURRENTLY HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"Spaniel" : "Spanijel",
"The tattoo number" : "",
"Missouri Foxtrotter" : "",
"Use SQL Interface" : "",
"S (Stray Cat)" : "ML (Macka lutalica)",
"Multiple Treatments" : "",
"Bouvier des Flanders" : "Flandrijski buvije, fladrijski govedar",
"These are the HTML headers and footers used when displaying online forms." : "",
"Paper Size" : "",
"Column" : "",
"January" : "",
"White and Torti" : "",
"Hedgehog" : "Jez",
"Golden" : "Zlatna",
"Document Link" : "",
"Vaccination Book" : "Knjizica vakcinacija",
"The date the animal was born" : "",
"Transfer To" : "",
"Add Report" : "",
"Settings, Lookup data" : "",
"This animal is not on the shelter." : "",
"Generate Report" : "",
"Cream" : "Krem",
"Points for matching breed" : "",
"Give and Reschedule" : "",
"Budgie/Budgerigar" : "",
"Kittens (under {0} months)" : "",
"Japanese Chin" : "Japanski cin",
"Reason the owner did not bring in the animal themselves" : "",
"{plural2} animals are not available for adoption" : "",
"Other Shelter" : "",
"This income account is the source for donations received of this type" : "",
"Mark this owner homechecked" : "",
"Description" : "Opis",
"Good with Dogs" : "Dobar odos sa psima",
"Kennel Cough" : "",
"Korat" : "Korat",
"Costs" : "Troškovi",
"Title Initials Last" : "",
"Only PDF, HTML and JPG image files can be attached." : "",
"Percheron" : "",
"{0} {1} {2} aged {3}" : "",
"Color" : "Boja",
"Creating donations and donation types creates matching accounts and transactions" : "",
"Go the options screen and set your shelter's contact details and other settings." : "",
"Test Book" : "",
"Person looking for report" : "",
"Add a new log" : "",
"Mo" : "",
"Chicken" : "Pile",
"Unreserved" : "",
"Send mass emails and perform mail merges" : "",
"Ragamuffin" : "",
"Title" : "",
"Returned" : "Vraceno",
"Light Amber" : "Svijetli cilbarbar",
"Delete this animal" : "",
"All donors on file." : "",
"Upload all available images for animals" : "",
"Adopted" : "Usvojen",
"View media" : "",
"The date the retailer movement is effective from" : "",
"Diary notes for: {0}" : "",
"Change date given on selected treatments" : "",
"Canaan Dog" : "Izraelski spic, kananski pas",
"Remove the size field from animal details" : "",
"Stationary costs" : "Troskovi kancelarijskog materijala ???",
"Today" : "",
"Schipperke" : "Njemacki spic",
"South Russian Ovcharka" : "Juznoruski ovcar",
"Premises" : "Objekti",
"Entered (oldest first)" : "",
"Donations require a person" : "",
"Scottish Deerhound" : "Skotski hrt",
"Released To Wild" : "Pustena u divljinu",
"Create this person" : "",
"Rhodesian Ridgeback" : "Rodezijski gonic",
"FIV" : "Maciji virus imunodeficijencije",
"Roles" : "",
"Include animals who don't have a picture" : "",
"Diets need a start date." : "",
"Ruddy" : "",
"Delete Regimen" : "",
"Welsh Springer Spaniel" : "Velski springer spanijel",
"{plural0} person has an overdue donation" : "",
"Telephone" : "Telefon",
"Animal - Notes" : "",
"Donation of {0} successfully received ({1})." : "",
"A description or other information about the animal" : "",
"There is not enough information in the form to create a waiting list record (need a description)." : "",
"Treat foster animals as part of the shelter inventory" : "",
"Rank" : "",
"View Investigations" : "",
"Accounts" : "Racuni / Nalozi ??'",
"Presa Canario" : "Kanarski pas",
"Tan" : "Zuta",
"From Other" : "",
"Rex" : "",
"Britannia Petite" : "Patuljasti britanski",
"Add to log" : "",
"New Movement" : "",
"White and Tabby" : "",
"Date Found" : "datum nalazenja",
"Import complete with {plural1} errors." : "",
"Internal Location" : "",
"Label" : "",
"Find lost animal returned {0} results." : "Pretraga izgubljenih zivotinja dala je {0} rezultata",
"Template names can include a path portion with /, eg: Vets/Rabies Certificate" : "",
"Cancel holds on animals this many days after the brought in date, or 0 to never cancel" : "",
"{plural0} week" : "",
"Default Breed" : "",
"French-Lop" : "",
"Post to Facebook as" : "",
"IP Restriction" : "",
"This will permanently remove the selected records, are you sure?" : "",
"(all)" : "(sve)",
"The shelter reference number" : "",
"Default Coat Type" : "",
"{plural1} urgent entries on the waiting list" : "",
"Remove the microchip fields from animal identification details" : "",
"Ferret" : "",
"Diary date is not valid" : "",
"Return a transferred animal" : "",
"Chocolate Tortie" : "",
"Doberman Pinscher" : "Doberman",
"Dalmatian" : "Dalamtinac",
"Add a photo" : "",
"FTP hostname" : "",
"(unknown)" : "(nepoznato)",
"This type of movement requires a date." : "Ova vrsta kretanja zahtjeva datum",
"Age" : "Godine",
"Change System Options" : "",
"Show the breed fields" : "",
"{plural1} animals are not available for adoption" : "",
"This date of birth is an estimate" : "",
"Hamster" : "Hrcak",
"{0}: Entered shelter {1}, Last changed on {2} by {3}. {4} {5} {6} aged {7}" : "",
"Days On Shelter" : "",
"Mustang" : "Mustang",
"Super user" : "",
"{plural3} trial adoptions have ended" : "",
"Labrador Retriever" : "Labrador retriver",
"Automatically remove" : "",
"Brindle and Black" : "",
"Save this person" : "",
"Entering 'deceased' in the search box will show you recently deceased animals." : "",
"Transferred Out" : "",
"DOB" : "",
"Member" : "Clan",
"Show short shelter codes on screens" : "",
"Good With Dogs" : "",
"Found from" : "",
"Donation?" : "",
"Import" : "",
"Not Reconciled" : "",
"Karelian Bear Dog" : "Karelijski gonic medvjeda",
"Complaint" : "Žalbe",
"August" : "",
"Due in next week" : "",
"Animal Types" : "",
"SQL dump (ASM2 HSQLDB Format)" : "",
"Diet" : "Dijeta",
"Create lost animal records from the selected forms" : "",
"Income::Sponsorship" : "Prihod: Sponzorstvo",
"Show transactions from" : "",
"Positive" : "Pozitivan",
"Remove the bonded with fields from animal entry details" : "",
"Died" : "",
"Delete Log" : "",
"Diary Task" : "",
"October" : "",
"Up for adoption" : "Za udomljavanje",
"Document Templates" : "Predlosci dokumenta",
"Required" : "",
"Remove retailer functionality from the movement screens and menus" : "",
"Edit template" : "",
"Columns displayed" : "",
"Donations" : "Donacije",
"The animal sex" : "",
"Time" : "",
"Biting" : "",
"Transferred In {0}" : "",
"All animals who are currently quarantined." : "",
"Transactions need a date and description." : "",
"Good With Cats" : "",
"Children" : "",
"Out" : "Izlaz",
"Media" : "Mediji",
"Gaited" : "",
"American Water Spaniel" : "Americki vodni spanijel",
"Time On Shelter" : "",
"Dutch" : "Holandski",
"Warn when adopting to a person who has been banned from adopting animals" : "",
"The primary breed of this animal" : "",
"Thoroughbred" : "",
"Add an animal to the waiting list" : "Dodavanje zivotinje na listu cekanja",
"Altered" : "Sterizacija / Kastracija",
"My diary notes" : "",
"Persian" : "Persijska",
"Medical Profiles" : "",
"No Locations" : "",
"New Profile" : "",
"Vaccinations need an animal and at least a required date." : "",
"Chartreux" : "",
"When entering dates, hold down CTRL and use the cursor keys to move around the calendar. Press t to go to today." : "Kada unosite datum, pritisnite CTRL i koristite kursor za kretanje u kalendaru. Pritisnite t za danasnji datum.",
"Reservation Cancelled" : "",
"Tabby" : "Karakteristicne sare - M mace",
"Account code '{0}' has already been used." : "",
"Rhinelander" : "",
"ASM News" : "",
"Delete Incoming Forms" : "",
"Email this message to all matching users" : "",
"This code has already been used." : "Ova sifra je vec u upotrebi",
"Document file" : "",
"View" : "",
"People matching '{0}'." : "",
"Reason Not From Owner" : "Razlog naveden od strane osobe koja nije vlasnik",
"Locale" : "",
"Mini-Lop" : "",
"Add Waiting List" : "",
"Parvovirus" : "Parvo virus",
"This animal is currently fostered, it will be automatically returned first." : "",
"Cinnamon Tortoiseshell" : "",
"Warn when adopting to a person who lives in the same area as the original owner" : "",
"This can take some time and generate a large file, are you sure?" : "",
"Add Medical Records" : "",
"Most browsers let you search in dropdowns by typing the first few letters of the item you want." : "",
"Sheep" : "Ovca",
"Entering 'activelost' or 'activefound' in the search box will show you lost and found animals reported in the last 30 days." : "",
"Template for Facebook posts" : "",
"Sponsorship donations" : "Sponzorske donacije",
"Add template" : "",
"Removal" : "",
"Jan" : "",
"Waiting List Donation" : "Donacija za listu cekanja",
"Create boarding cost record when animal is adopted" : "",
"Induct a new animal" : "",
"White" : "Bijela",
"Additional fields" : "Dodatna polja",
"Brussels Griffon" : "Briselski grifon",
"{plural2} unaltered animals have been adopted in the last month" : "",
"Use TLS" : "",
"Add investigation" : "",
"Transactions" : "",
"Stats show running figures for the selected period of animals entering and leaving the shelter on the home page." : "",
"Email media" : "",
"Change User Settings" : "",
"Updating..." : "",
"Edit transaction" : "",
"Browse sheltermanager.com" : "",
"Expenses::Stationary" : "Troskovi: Kancelarijskog materijala",
"Auto removed due to lack of owner contact." : "",
"Animal (optional)" : "",
"Edit Roles" : "",
"Add Log to Animal" : "",
"System user accounts" : "",
"Attach a link to a web resource" : "",
"View Diary" : "",
"Profile name cannot be blank" : "",
"Person - Type" : "",
"Initials" : "",
"Simple" : "",
"N (Non Shelter Animal)" : "N (Zivotinje koje se ne nalaze u sklonistu)",
"Animal - Details" : "",
"System Admin" : "",
"Lost animal entry {0} successfully created." : "",
"Include deceased" : "",
"New Litter" : "",
"Cattle Dog" : "Stocarski pas",
"Samoyed" : "Samojed",
"Organisation" : "Organizacija",
"Generate a javascript database for the search page" : "",
"Change Log" : "",
"Chinchilla" : "Cincila",
"Start date" : "",
"Roles need a name." : "",
"Appenzell Mountain Dog" : "Apencelski planinski pas",
"Shepherd" : "Ovcar",
"Added" : "",
"Boston Terrier" : "Bostonski terijer",
"Change Found Animal" : "",
"Add Message" : "",
"All animal shelters on file." : "",
"A unique number to identify this movement" : "",
"Booster" : "",
"Found Animal" : "Pronadjena zovotinja",
"Transfers must have a valid transfer date." : "",
"Time on list" : "",
"Siamese" : "",
"If this person is a member, the date that membership expires." : "",
"Cockapoo" : "",
"treatments" : "",
"Black and Tan Coonhound" : "Americki palez-crni rakunski gonic",
"All fields should be completed." : "",
"This year" : "",
"Incoming forms are online forms that have been completed and submitted by people on the web." : "",
"Options" : "Opcije",
"Incoming donations (misc)" : "Prispjevajuce donacije",
"Aged To" : "",
"Only show special needs" : "",
"Apr" : "",
"Money" : "Novac",
"Shar Pei" : "",
"Microchip Date" : "",
"Change Medical Records" : "",
"Transfer?" : "",
"Dachshund" : "Jazavicar",
"Sexes" : "",
"Return Category" : "",
"Next>" : "",
"View Accounts" : "",
"Fawn Tortoiseshell" : "",
"Black" : "Crna",
"View Tests" : "",
"Edit diary tasks" : "Uredjivanje zapisa u dnevniku",
"View the animals in this litter" : "",
"All homecheckers on file." : "",
"Yes/No" : "Da/ne",
"Most relevant" : "",
"Change Transactions" : "",
"Remove previously published files before uploading" : "",
"Litter Reference" : "",
"Monday" : "",
"Find Lost Animal" : "Pretraga izgubljenih zivotinja",
"Split baby/adult age at" : "",
"Add Donation" : "",
"Aged Between" : "",
"Use fancy tooltips" : "",
"Delete Treatments" : "",
"Date" : "Datum",
"View Litter" : "",
"Data" : "",
"Find Animal" : "Nadji zivotinju",
"Superuser" : "",
"All time" : "",
"All fosterers on file." : "",
"Enabled" : "",
"Find found animal returned {0} results." : "Pretraga pronadjenih zivotinja dala je {0} rezultata",
"Default to advanced find person screen" : "",
"Payment Types" : "",
"Cinnamon" : "",
"Install" : "",
"Corgi" : "Velski korgi",
"Omit header/footer" : "",
"ASM can track detailed monthly and annual figures for your shelter. Install the Monthly Figures and Annual Figures reports from Settings-Reports-Browse sheltermanager.com" : "",
"History" : "Istorijat",
"Attach File" : "",
"(both)" : "",
"Publish now" : "Objavi sada",
"{plural3} years." : "",
"ASM comes with a dictionary of 4,000 animal names. Just click the generate random name button when adding an animal." : "",
"SmartTag PETID" : "",
"Due in next year" : "",
"Create Waiting List" : "",
"Title First Last" : "",
"Various" : "",
"Password is incorrect." : "",
"Welsh Corgi" : "Velski kori",
"Dogue de Bordeaux" : "Bordoska doga",
"{plural1} months" : "",
"Movement dates clash with an existing movement." : "",
"Hovawart" : "Hovavart",
"Conure" : "Papagaj",
"{plural1} days." : "",
"Mail" : "",
"Balinese" : "Balijska",
"Vaccination book" : "Registar vakcinacija",
"Save this animal" : "",
"Important" : "",
"{plural3} animals were adopted" : "",
"Maltese" : "Maltezer",
"New Waiting List Entry" : "",
"1 treatment" : "",
"Name" : "Naziv",
"This animal was transferred from another shelter" : "",
"Healthy" : "Zdrava",
"Seal" : "",
"Crossbreed" : "",
"treatments, every" : "",
"Publishing template" : "",
"{plural3} tests need to be performed today" : "",
"The result of the FIV test" : "",
"Invalid email address" : "",
"Upload splash.jpg and logo.jpg to override the login screen image and logo at the top left of ASM." : "",
"Jindo" : "Korejski jindo pas",
"Work" : "",
"All animal care officers on file." : "",
"filters: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound" : "",
"Name contains" : "",
"Add Cost" : "",
"after connecting, chdir to" : "",
"Is this a permanent foster?" : "",
"Wed" : "",
"Norwegian Buhund" : "Norveski ovcar",
"Comments" : "Napomene",
"Turkish Angora" : "Turska angora",
"Movement Date" : "",
"UK Giftaid" : "",
"Test" : "",
"PetLink Publisher" : "",
"We" : "",
"Age groups are assigned based on the age of an animal. The figure in the left column is the upper limit in years for that group." : "",
"American Bulldog" : "Americki buldog",
"Wk" : "",
"Account disabled." : "",
"Lost animals reported in the last 30 days." : "",
"Order published animals by" : "",
"Diary" : "Dnevnik",
"{0} incurred in costs" : "",
"The SmartTag type" : "",
"Get more reports from sheltermanager.com" : "",
"Default Donation Type" : "",
"Show the location unit field" : "",
"Jump to diary" : "",
"Show quick links on all pages" : "",
"Brotogeris" : "",
"Annual" : "Godisnje",
"Welsh Terrier" : "Velski terijer",
"Large" : "Velika",
"Add Accounts" : "",
"Recently Changed" : "",
"Points for matching lost/found area" : "",
"Template Name" : "",
"Animal shortcode format" : "",
"Diary note {0} rediarised for {1}" : "",
"Find this address on a map" : "",
"Grey and White" : "Sivo-bijela",
"Delete Media" : "",
"Shiba Inu" : "",
"Hound" : "Lovacki pas",
"First Last" : "",
"Edit test" : "",
"Californian" : "Kalifornijski",
"Add a found animal" : "Dodaj pronadjenu zivotinju",
"Vaccination Types" : "",
"Bite" : "Grize",
"Income" : "Prihod",
"Zipcode" : "",
"Jump to media" : "",
"Facebook page" : "",
"Horizontal Pitch" : "",
"Location" : "Mjesto",
"Chinese Foo Dog" : "Kineski 'fu' pas, Pas srece",
"View Person" : "",
"Carolina Dog" : "Americki dingo",
"Pig (Farm)" : "Svinja",
"Saint Bernard St. Bernard" : "Bernandinac",
"Dead On Arrival" : "",
"On shelter for {0} days, daily cost {1}, cost record total <b>{2}</b>" : "U sklonistu {0} dana, dnevni troskovi {1}, evidencija ukupnih troskova <b>{2}</b>",
"Saddlebred" : "",
"This person has not passed a homecheck" : "Ova osoba nema adekvatan prostor za udomljavanje",
"Diary subject cannot be blank" : "",
"Edit vaccination" : "",
"Bank::Current" : "Banka::Tekuci",
"All homechecked owners on file." : "",
"weeks after last contact." : "",
"Paint/Pinto" : "",
"Burmese" : "Burmanska",
"African Grey" : "",
"Himalayan" : "Himelajska",
"Points for being found within 2 weeks of being lost" : "",
"User Roles" : "",
"This animal is quarantined" : "",
"Report Title" : "",
"Attach a file" : "",
"Give" : "",
"Standardbred" : "",
"Defaults formats for code and shortcode are TYYYYNNN and NNT" : "",
"Reset Password" : "",
"Users need a username, password and at least one role or the superuser flag setting." : "",
"Clydesdale" : "",
"Basset Hound" : "Baset",
"Field Spaniel" : "Poljski spanijel",
"Last Month" : "",
"Vouchers" : "",
"Domestic Medium Hair" : "Domaca srednje duzine dlake",
"Remove the FIV/L test fields from animal health details" : "",
"Default to advanced find animal screen" : "",
"Add log" : "",
"Log requires an animal." : "",
"{plural2} animals were transferred to other shelters" : "",
"Yes/No/Unknown" : "",
"Animal Shelter Manager Login" : "Prijava na ASM",
"Date brought in cannot be blank" : "Datum prijema je obavezno polje",
"Publishing" : "",
"Create a new template by copying the selected template" : "",
"This animal was dead on arrival to the shelter" : "",
"Unaltered Adopted Animals" : "",
"Foster movements must have a valid foster date." : "",
"Logout" : "Odjava",
"You didn't specify any search criteria, so an on-shelter search was assumed." : "",
"Zipcode contains" : "",
"Receive a donation" : "Primanje donacija",
"Remove unwanted functionality" : "",
"{plural1} animals were transferred to other shelters" : "",
"English Shepherd" : "Engleski ovcar",
"Available for adoption" : "Dostupna za udomljavanje",
"Australian Kelpie" : "Australisjki kelpi",
"The date this person was homechecked." : "",
"Species A-Z" : "",
"Highlight" : "",
"Birman" : "Birmanska",
"Any information about the animal" : "",
"Area Found" : "Mjesto pronalaska",
"Available sheltermanager.com reports" : "",
"Mandatory" : "",
"To" : "",
"Date Reported" : "Datum prijave",
"Animal Emblems" : "",
"{plural0} animal was transferred to another shelter" : "",
"{plural1} trial adoptions have ended" : "",
"Colors" : "",
"All animals matching current publishing options." : "",
"Lhasa Apso" : "",
"This animal has a SmartTag PETID" : "",
"Edit online form" : "",
"Bulk Complete Waiting List" : "",
"Size" : "Velicina",
"Additional" : "",
"Document Repository" : "",
"FIV Result" : "",
"Transfer" : "Transfer",
"Akbash" : "Akbas",
"Palomino" : "",
"Somali" : "Somalijska",
"Find Found Animal" : "Pretraga pronadjenih zivotinja",
"Waiting List - Removal" : "",
"Profile" : "",
"The result of the FLV test" : "",
"Complete" : "",
"Litters" : "Legla",
"Chart" : "",
"Lost animals must have a contact" : "Mora postojati kontakt osoba za izgubljenu zivotinju",
"URL" : "",
"First Vaccination" : "Prva vakcinacija",
"Silver" : "Srebrna",
"When creating donations from the Move menu screens, mark them due instead of received" : "",
"Culling" : "Lucenje nezeljenih jedinki",
"Comments Contain" : "",
"Tosa Inu" : "",
"When ASM should stop showing this message" : "",
"Clear" : "",
"{plural0} result found in {1} seconds. Order: {2}" : "",
"Create missing lookup values" : "",
"Donation book" : "",
"There is not enough information in the form to create a found animal record (need a description and area found)." : "",
"Brittany Spaniel" : "Britanski spanijel",
"Defaults" : "",
"Owl" : "Sova",
"This screen allows you to add extra documents to your database, for staff training, reference materials, etc." : "",
"No view permission for this report" : "",
"Add Litter" : "",
"Message Board" : "",
"In" : "Ulaz",
"Old English Sheepdog" : "Staroengleski ovcar",
"Date found cannot be blank" : "Polje datum pronalska ne moze biti prazno",
"Holland Lop" : "",
"{plural2} trial adoptions have ended" : "",
"Vaccination" : "Vakcinacija",
"Pumi" : "Pumi",
"Points for matching age group" : "",
"Form URL" : "",
"Death" : "",
"Lookup" : "Pregledaj",
"Eclectus" : "Elektus papagaj",
"Microchip Number" : "",
"Peruvian Inca Orchid" : "Peruanski Inka pas",
"{plural2} people with active reservations have not been homechecked" : "",
"Completed" : "Zavrsen",
"View Diets" : "",
"Donation Types" : "",
"State contains" : "",
"When I generate a document, make a note of it in the log with this type" : "",
"Change" : "",
"Default Color" : "",
"To Retailer" : "",
"Adoptions {0}" : "",
"Adoption fee donations" : "Donacije troška udomljavanja",
"Manx" : "Manx",
"Starts" : "",
"Boykin Spaniel" : "Bojkin spanijel",
"Dandi Dinmont Terrier" : "Dendi dinmnont terijer",
"This type of movement requires a person." : "Ova vrsta kretanja zahtjeva osobu",
"Online forms can be linked to from your website and used to take information from visitors for applications, etc." : "",
"<Prev" : "",
"Create a new animal from this found animal record" : "",
"SQL dump (without media)" : "",
"Note" : "",
"Publishing Options" : "Opcije objavljivaja",
"Split pages with a species name prefix" : "",
"Show" : "",
"Greyhound" : "Engleski kratkodlaki hrt",
"9 months" : "",
"Pig" : "Svinja",
"Publish to PetFinder.com" : "Objavi na PetFinder.com",
"Brought In" : "Datum dolaska",
"Animal '{0}' successfully marked deceased." : "",
"When displaying person names in lists, use the format" : "",
"Payment Type" : "",
"Add message" : "",
"FLV" : "Macija leukemija",
"SQL interface" : "SQL interfejs",
"Animal Type" : "",
"New" : "",
"Default Test Type" : "",
"Publish to MeetAPet.com" : "",
"Online Forms" : "",
"Match this animal with the lost and found database" : "",
"Homechecked by" : "",
"Edit document" : "",
"Create found animal records from the selected forms" : "",
"{plural2} animals were euthanized" : "",
"Access them via the url 'image?mode=dbfs&id=/reports/NAME'" : "",
"{plural3} animals are not available for adoption" : "",
"Oriental Long Hair" : "Orjentalna dugodlaka",
"Completed notes upto today" : "",
"SmartTag Publisher" : "",
"Configuration" : "Postavke",
"Cash" : "",
"Reservation Date" : "",
"Husky" : "Haski",
"Norfolk Terrier" : "Norfolski terijer",
"Description Contains" : "",
"Create a new template" : "",
"View Person Links" : "",
"on" : "",
"Polish Lowland Sheepdog" : "Poljski nizijski ovcar",
"New Task" : "",
"Most browsers will let you visit a record you have been to in this session by typing part of its name in the address bar." : "Vecina preglednika omogucava pristup evidenciji ove sesije ukucavanjem djela njenog naziva u polje za adresu",
"System Options" : "",
"White and Tan" : "Bijelo-zuta",
"Message" : "",
"or" : "",
"Include this information on animals shared via Facebook" : "",
"Young Adult" : "Mlada osoba",
"No" : "Ne",
"Can afford donation?" : "",
"Add" : "Dodajte...",
"Retailer movements must have a valid movement date." : "",
"{plural1} weeks." : "",
"Black and Brown" : "Crno-smedja",
"Original Owner" : "",
"This animal is part of a cruelty case against an owner" : "",
"Active users: {0}" : "Aktivnih korisnika: {0}",
"More Medications" : "",
"Add Log" : "",
"Letter" : "",
"Italian Spinone" : "",
"Expiry date" : "",
"You can drag and drop animals in shelter view to change their locations." : "",
"Kishu" : "Kisu",
"If you don't select any locations, publishers will include animals in all locations." : "",
"Error" : "",
"Small" : "Mala",
"Add Tests" : "",
"Irish Setter" : "Irski seter",
"Skye Terrier" : "Skaj terijer",
"Set the email content-type header to text/html" : "",
"Complete Tasks" : "",
"Surname" : "",
"Bulk Complete Vaccinations" : "",
"Remove the tattoo fields from animal identification details" : "",
"Add details of this email to the log after sending" : "",
"Close" : "Zatvori",
"Entering 'os' in the search box will show you all shelter animals." : "",
"Change Date Required" : "",
"Gecko" : "",
"This person lives in the same area as the person who brought the animal to the shelter." : "",
"Alphabetically Z-A" : "",
"{plural1} months." : "",
"{plural3} medical treatments need to be administered today" : "",
"Plott Hound" : "",
"Thursday" : "",
"Not reconciled" : "",
"Waiting list entry successfully added." : "",
"Costs need a date and amount." : "",
"Change Report" : "",
"Shelter animals" : "",
"Hold until" : "",
"{plural3} animals were reclaimed by their owners" : "",
"Treatment marked as given for {0} - {1}" : "",
"Sizes" : "",
"Import a CSV file" : "",
"Criteria:" : "",
"NNN or NN = number unique for this type of animal for this year" : "",
"A movement must have a reservation date or type." : "",
"You have unsaved changes, are you sure you want to leave this page?" : "",
"Black and Brindle" : "",
"Vertical Pitch" : "",
"Password" : "",
"Transfer In" : "",
"Forenames" : "",
"(master user, not editable)" : "",
"Edit {0}" : "",
"Warn if the name of the new animal is similar to one entered recently" : "",
"Leukaemia" : "Leukemija",
"Start at" : "",
"All vets on file." : "",
"Entry" : "",
"Once assigned, codes cannot be changed" : "",
"This will permanently remove this person, are you sure?" : "",
"View littermates" : "",
"Any animal types, species, breeds, colors, locations, etc. in the CSV file that aren't already in the database will be created during the import." : "",
"Breeds" : "",
"Forms need a name." : "",
"The date this animal was put on the waiting list" : "",
"First Names" : "",
"Import complete with {plural3} errors." : "",
"This animal has movements and cannot be removed." : "",
"Quicklinks are shown on the home page and allow quick access to areas of the system." : "",
"Find Animal/Person" : "",
"Donations need at least one date, an amount and a person." : "",
"Update publishing options" : "",
"Deceased" : "Uginula",
"Image file" : "",
"Only show cruelty cases" : "",
"Urgencies" : "",
"Default transaction view" : "",
"Fox Terrier" : "Foksterijer",
"Checkered Giant" : "",
"Unknown" : "Nije poznato",
"Breed" : "Rasa",
"{plural1} unaltered animals have been adopted in the last month" : "",
"Black Mouth Cur" : "",
"I've finished, Don't show me this popup again." : "",
"SM Account" : "",
"Points required to appear on match report" : "",
"Cymric" : "Cymric",
"Beauceron" : "Boseron",
"Top Margin" : "",
"Brown" : "Smedja",
"Fostered" : "",
"Adoption Fee" : "",
"Due in next month" : "",
"Your CSV file should have a header row with field names ASM recognises. Please see the manual for more information." : "",
"Not Available for Adoption" : "Nije dostupan za udomljavanje",
"to" : "",
"Fosterer" : "",
"Moving..." : "",
"Add extra images for use in reports and documents" : "",
"American Eskimo Dog" : "Americki eskimski pas",
"<<" : "",
"Schnauzer" : "Snaucer",
"Tattoo Number" : "",
"Animal Selection" : "",
"Parrot (Other)" : "Papagaj (Ostali)",
"Refresh" : "",
"Lost and Found" : "",
"Amount" : "",
"Edit donation" : "",
"Other Organisation" : "",
"Edit All Diary Notes" : "",
"The species of this animal" : "",
"Homecheck areas" : "",
"Expenses::Postage" : "Troskovi::Postarina",
"Home Phone" : "",
"This will permanently remove this account and ALL TRANSACTIONS HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"The movement number '{0}' is not unique." : "",
"and" : "",
"Macaw" : "Ara papagaji",
"Jump to movements" : "",
"Canary" : "Kanarinac",
"Temporary Vaccination" : "Privremena vakcinacija",
"Medical book" : "Zdravstveni registar",
"Or upload a script" : "",
"[None]" : "Nema",
"Lost animal entries matching '{0}'." : "",
"English Toy Spaniel" : "Engleski patuljasti spanijel",
"Airedale Terrier" : "Erdel terijer",
"All members on file." : "",
"Log" : "Dnevnik",
"HTML/FTP Publisher" : "",
"{plural1} shelter animals have people looking for them" : "",
"Date Of Birth" : "Datum rodjenja",
"None" : "Ništa",
"Points for matching zipcode" : "",
"Publish HTML via FTP" : "Objavit HTML putem FTP",
"This person has been under investigation" : "",
"A list of areas this person will homecheck - eg: S60 S61" : "",
"Parrotlet" : "",
"Vaccinate" : "",
"All volunteers on file." : "",
"Vaccination Given" : "",
"Finnish Spitz" : "Finski spic",
"Abuse" : "Zlostvaljana",
"Date Removed" : "",
"Vaccinations" : "Vakcinacije",
"People with active reservations, but no homecheck has been done." : "",
"Cancel unadopted reservations after" : "",
"{plural2} results found in {1} seconds. Order: {2}" : "",
"Performed" : "",
"Default image for documents" : "",
"No matches found." : "",
"Log entries need a date and text." : "",
"Short" : "Kratka",
"Puli" : "Puli",
"{0} received in donations" : "",
"Future notes" : "",
"You can middle click a link to open it in a new browser tab (push the wheel on most modern mice)." : "Srednjim klikom misa mozete otvoriti lik u novom prozoru preglednika (pritiskom tockica kod vecine modernih miseva).",
"Recently Adopted" : "",
"Pass Homecheck" : "",
"Contact" : "Kontakt",
"Include cruelty case animals" : "",
"GiftAid" : "",
"No data to show on the report." : "",
"Go the system users screen and add user accounts for your staff." : "",
"Thai Ridgeback" : "Tajlandski",
"Shelter stats (all time)" : "",
"Add Media" : "",
"Prairie Dog" : "Prerijski pas",
"Return date cannot be before the movement date." : "",
"FLV Result" : "",
"Tennessee Walker" : "",
"Date and notes are mandatory." : "",
"This screen allows you to add extra images to your database, for use in reports and documents." : "",
"Logged in Facebook user" : "",
"Pension" : "Penzija",
"Aug" : "",
"View Staff Person Records" : "",
"Generate a document from this animal" : "",
"Unspecified" : "Nije navedeno",
"Edit voucher" : "",
"Animal" : "Zivotinja",
"Standard" : "Standardno",
"estimate" : "",
"Male" : "",
"Edit Online Forms" : "",
"Pot Bellied" : "",
"Cost Type" : "",
"Liver" : "",
"Display Index" : "",
"Died {0}" : "",
"Medical profiles" : "",
"more" : "",
"Change Vaccinations" : "",
"When matching lost animals, include shelter animals" : "",
"Multi-Lookup" : "",
"Add Animals" : "",
"Su" : "",
"Wire-haired Pointing Griffon" : "",
"American" : "Americka",
"CSV of person data" : "",
"Found" : "Pronadjena",
"Sa" : "",
"Miniature Pinscher" : "Patuljasti pticar",
"Jersey Wooly" : "",
"Produce a CSV File" : "",
"New Voucher" : "",
"If this is the web preferred image, web publishers will use these notes as the animal description" : "",
"Manually enter codes (do not generate)" : "",
"{plural2} weeks." : "",
"The date the donation was received" : "",
"Open reports in a new browser tab" : "",
"Include fostered animals" : "",
"Edit diary task" : "",
"Person - Additional" : "",
"Edit the current waiting list" : "Uredjivanje trenutne liste cekanja",
"RescueGroups Publisher" : "",
"Area Lost" : "",
"Bulk Complete Medical Records" : "",
"Remove short shelter code box from the animal details screen" : "",
"Add this text to all animal descriptions" : "",
"Portuguese Water Dog" : "Portugalski plivajuci pas",
"{0} cannot be blank" : "{0} ne može biti prazno",
"The sheltermanager.com admin account password cannot be changed here, please visit {0}" : "",
"Go the lookup data screen and add/remove breeds, species and animal types according to the animals your shelter deals with." : "",
"Incoming" : "",
"Recently Entered Shelter" : "",
"German Wirehaired Pointer" : "Njemacki ostrodlaki pticcar",
"Hidden" : "",
"All diary notes" : "",
"Animal - Death" : "",
"Shelter code {0} has already been allocated to another animal." : "Sifra skloništa je vec dodjeljena drugoj zivotinji",
"Expires" : "",
"English Bulldog" : "Engleski buldog",
"New Diet" : "",
"Recently Fostered" : "",
"All animals on the shelter." : "",
"Dog" : "Pas",
"Flags" : "",
"Withdrawal" : "",
"Dutch Shepherd" : "Njemacki ovcar",
"Password for '{0}' has been reset to default of 'password'" : "",
"Brought In By" : "",
"Times should be in HH:MM format, eg: 09:00, 16:30" : "",
"Show the internal location field" : "",
"Warn when adopting to a person who has previously brought an animal to the shelter" : "",
"Edit Lookups" : "",
"Code contains" : "",
"Save this waiting list entry" : "",
"Entry Category" : "",
"File" : "",
"Mail Merge" : "",
"Change Diets" : "",
"View Document Repository" : "",
"Horse" : "Konj",
"Kyi Leo" : "",
"{plural2} reservations have been active over a week without adoption" : "",
"Enable the waiting list functionality" : "",
"Diary for {0}" : "",
"Default urgency" : "",
"The date this animal was lost" : "",
"Oct" : "",
"Movement Number" : "",
"Thumbnail size" : "",
"Silver Fox" : "Srebrena lisica",
"Select a person to merge into this record. The selected person will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "",
"March" : "",
"Nova Scotia Duck-Tolling Retriever" : "Novoskotski retriver",
"Priority Floor" : "",
"Wirehaired Terrier" : "Kovrdzavi terijer",
"Estimate" : "",
"Treatment name cannot be blank" : "",
"All animals who are currently held in case of reclaim." : "",
"Edit report" : "",
"Last name" : "",
"Select a person to attach this form to." : "",
"Retailer Book" : "",
"Senior" : "Starija osoba",
"Include this image when publishing" : "",
"Mark new animals as not for adoption" : "",
"Cell" : "",
"Match Lost and Found" : "",
"Generate documentation" : "",
"This person is very similar to another person on file, carry on creating this record?" : "",
"Waiting List: {0}" : "Lista cekanja: {0}",
"Generate Documents" : "",
"Terrapin" : "Vodena kornjaca",
"Rat" : "Pacov",
"Chihuahua" : "Civava",
"Diary task items need a pivot, subject and note." : "",
"Delete Animals" : "",
"Warn when adopting to a person who has not been homechecked" : "",
"Updated." : "",
"Facebook Sharing" : "",
"Delete Vaccinations" : "",
"Search sort order" : "",
"Rhea" : "",
"Donor" : "Donator",
"Treatment Given" : "",
"Reservation book" : "Registar rezervacija",
"Expenses::Water" : "Troskovi::Voda",
"Lowest" : "",
"Medium" : "Srednje",
"New Role" : "",
"{plural2} tests need to be performed today" : "",
"Default Entry Reason" : "",
"This animal should be held in case it is reclaimed" : "",
"Flag" : "",
"This person is linked to an animal and cannot be removed." : "",
"Create a new document" : "",
"Report" : "",
"Add diary task" : "",
"Move an animal to a retailer" : "Premjestite zivotinju kod preprodavca",
"Weekly" : "Sedmicno",
"Open records in a new browser tab" : "",
"Lookups" : "",
"New Zealand" : "",
"Receive" : "",
"Your sheltermanager.com account is due to expire on {0}, please renew {1}" : "",
"{plural1} tests need to be performed today" : "",
"Sealyham Terrier" : "Silhem terijer",
"Age Group 3" : "",
"Age Group 2" : "",
"Age Group 1" : "",
"Age Group 7" : "",
"Age Group 6" : "",
"Age Group 5" : "",
"Age Group 4" : "",
"Age Group 8" : "",
"Default Brought In By" : "",
"Neutered" : "",
"Delete" : "Brisanje",
"You can set a default amount for different donation types in the Settings- Lookup Data screen. Very handy when creating adoptions." : "",
"Video Link" : "",
"Waiting list entry for {0} ({1})" : "Unos na listu cekanja za {0} ({1})",
"After the user presses submit and ASM has accepted the form, redirect the user to this URL" : "",
"Returned To Owner" : "Vracen vlasniku",
"Generate a random name for this animal" : "",
"Clear tables before importing" : "",
"Turkish Van" : "Turski van",
"Upload Document" : "",
"Make this the default image when viewing this record and publishing to the web" : "",
"Diary task: {0}" : "Zadatak u dnevniku: {0}",
"Leonberger" : "",
"{plural1} animals were euthanized" : "",
"{plural2} animals were adopted" : "",
"This Week" : "",
"Fri" : "",
"Siberian" : "Siberijska",
"Saluki" : "Perzijski hrt",
"The selected file is not an image." : "",
"Bluetick Coonhound" : "Plavi gonic rakuna",
"{plural1} medical treatments need to be administered today" : "",
"Entering 'notforadoption' in the search box will show you all shelter animals with the not for adoption flag set." : "",
"Advanced find animal screen defaults to on shelter" : "",
"Add Vouchers" : "",
"Foxhound" : "Lisicar",
"Attach this form to an existing person" : "",
"Issue a new insurance number for this animal/adoption" : "",
"This animal has a tattoo" : "",
"Toy Fox Terrier" : "Patuljasti foksterijer",
"Started" : "",
"{plural2} vaccinations need to be administered today" : "",
"Italian Greyhound" : "Italijanski kratkodlaki hrt",
"View Animal Vet" : "",
"Reservations must have a valid reservation date." : "",
"Date put on" : "",
"Puppies (under {0} months)" : "",
"This animal has been heartworm tested" : "",
"Gift Aid" : "",
"Edit Header/Footer" : "",
"8 weeks" : "",
"Tattoo Date" : "",
"People Looking For" : "",
"The date the trial adoption is over" : "",
"Add medical regimen" : "",
"Create a new waiting list entry from this found animal record" : "",
"Did you know?" : "",
"Show tips on the home page" : "",
"(blank)" : "(prazno)",
"Reason not from Owner" : "",
"Edit medical regimen" : "",
"British Shorthair" : "Britanka kratkodlaka",
"Delete Litter" : "",
"Income::WaitingList" : "Prihod:ListaCekanja",
"Cancel" : "Otkazi",
"Devon Rex" : "Devon Rex",
"Save and leave" : "",
"Loading..." : "Ucitavanje",
"Reason for Entry" : "",
"Default Size" : "",
"Current" : "",
"Received in last year" : "",
"{0} {1} aged {2}" : "{0} {1} starosti {2}",
"The coat type of this animal" : "",
"Users" : "",
"Shelter code cannot be blank" : "Sifra slonista je obavezno polje",
"Parakeet (Other)" : "Mali papagaj (Ostali)",
"Add a test" : "",
"Treeing Walker Coonhound" : "",
"{plural0} month." : "",
"Media notes contain" : "",
"Calico" : "Calico - domaca macka bijelo-crno-narandzasta",
"American Curl" : "Americki kovrdzavi",
"Thu" : "",
"Chart (Steps)" : "",
"Entry Reasons" : "",
"{plural1} years." : "",
"Change Tests" : "",
"Add Document to Repository" : "",
"New Vaccination" : "",
"{plural1} results found in {1} seconds. Order: {2}" : "",
"Bernese Mountain Dog" : "Bernski planinski pas",
"Cost" : "",
"Publish to HelpingLostPets.com" : "",
"Edit additional field" : "Uredite dodatno polje",
"Address contains" : "",
"Clumber Spaniel" : "Klumber spanijel",
"The date the owner last contacted the shelter" : "",
"Total donations" : "",
"T = first letter of animal type" : "",
"Unsuitable Accomodation" : "Neprikladan smjestaj",
"Show the color field" : "",
"Extra images" : "Dodatne slike",
"Rediarised" : "",
"The date the foster is effective from" : "",
"Medical profiles need a profile name, treatment, dosage and frequencies." : "",
"Vietnamese Pot Bellied" : "",
"Medicate Animal" : "",
"Delete Donation" : "",
"Field names should not contain spaces." : "",
"Reconciled" : "",
"less" : "",
"A unique reference for this litter" : "",
"Link" : "",
"Generate" : "",
"Select date for diary task" : "",
"Add lost animal" : "Dodaj nestalu zivotinju",
"Messages successfully sent" : "",
"Rottweiler" : "Rotvajler",
"(everyone)" : "(svi)",
"Search returned {0} results." : "Pretragom pronadeno {0} rezultata",
"MM = current month" : "",
"Found Animal - Additional" : "",
"Quaker Parakeet" : "",
"Display" : "",
"You can use incoming forms to create new records or attach them to existing people." : "",
"Chart (Point)" : "",
"(any)" : "",
"{0} is running ({1}% complete)." : "",
"Held" : "",
"The date the animal was heartworm tested" : "",
"Female" : "Ženka",
"Mini Rex" : "",
"Hairless" : "",
"Bombay" : "Bombay",
"Dogo Argentino" : "Argentinska doga",
"Confirm Password" : "",
"Do not show" : "",
"Lost animal - {0} {1} [{2}]" : "Izgubljena zivotinja- {0} {1} [{2}]",
"For" : "",
"Create" : "Kreiraj",
"First name(s)" : "",
"Work Phone" : "",
"CSV of animal/adopter data" : "",
"Kai Dog" : "",
"This animal has been altered" : "",
"Good With Children" : "",
"Execute the SQL in the box below" : "",
"Medical regimens need an animal, name, dosage, a start date and frequencies." : "",
"Belgian Shepherd Tervuren" : "Belgijski ovcar tervuren",
"Change Date Given" : "",
"Phone" : "",
"Norwegian Elkhound" : "Norveski gonic (loseva)",
"Extra Images" : "",
"Select all" : "",
"Entered shelter" : "Datum prijema u azil",
"Expenses::Food" : "Troskovi:Hrana",
"1 year" : "1 godina",
"Least recently changed" : "",
"The locale determines the language ASM will use when displaying text, dates and currencies." : "",
"Delete Report" : "",
"Manual" : "",
"{plural2} months." : "",
"Income::EntryDonation" : "Prihod:PrijemneDonacije",
"{plural3} animals entered the shelter" : "",
"Not for adoption" : "Nije za udomljavanje",
"{plural2} animals died" : "",
"Ostrich" : "Noj",
"Maremma Sheepdog" : "",
"German Pinscher" : "Njemacki gonic",
"Brindle and White" : "",
"Publish Animals to the Internet" : "",
"Special Needs" : "Posebne potrebe",
"Diary notes need a date and subject." : "",
"FIV/L Test Date" : "",
"Death Reason" : "",
"Date of birth cannot be blank" : "Datum rođenja je obavezno polje",
"Medicate" : "",
"Unit" : "",
"Stop Publishing" : "",
"Brindle" : "",
"Yes" : "Da",
"Setter" : "Seter",
"Keeshond" : "Njemacki spic, vuciji spic",
"Edit notes" : "",
"Edit log" : "",
"No publishers are running." : "",
"Harlequin" : "",
"End at" : "",
"Florida White" : "",
"Diary tasks need a name." : "",
"6 months" : "",
"Delete Accounts" : "",
"This animal has active reservations, they will be cancelled." : "",
"This month" : "",
"Eligible for gift aid" : "",
"Homechecker" : "",
"Waiting list entries must have a contact" : "",
"On shelter for {0} days. Total cost: {1}" : "U azilu {0} dana. Ukupni troskovi: {1}",
"Euthanized" : "Eutaniziran",
"Homechecked" : "",
"Copy from animal comments" : "",
"Permanent Foster" : "",
"{plural3} weeks." : "",
"Donkey" : "Magarac",
"Location and Unit" : "",
"Emu" : "Emu",
"Account code cannot be blank." : "",
"Cavalier King Charles Spaniel" : "Kavaljerski spanijel kralja Carlsa",
"{plural1} animals entered the shelter" : "",
"Th" : "",
"Remove the declawed box from animal health details" : "",
"All Publishers" : "",
"Issued" : "",
"Delete Investigation" : "",
"YY or YYYY = current year" : "",
"German Shorthaired Pointer" : "Njemacki kratkodlaki pticar",
"Very Large" : "Veoma velika",
"Done" : "",
"People with overdue donations." : "",
"Unaltered" : "",
"Non-Shelter" : "",
"This person has not passed a homecheck." : "",
"Applehead Siamese" : "Sijamska",
"City" : "",
"Bullmastiff" : "Bulmastif",
"Name Contains" : "",
"Australian Shepherd" : "Australijski ovcar",
"{plural0} reservation has been active over a week without adoption" : "",
"Number of animal links to show" : "",
"Successfully attached to {0}" : "",
"Share this animal on Facebook" : "",
"Shetland Sheepdog Sheltie" : "Setlandski ovcarski pas",
"View Vaccinations" : "",
"Abandoned" : "Napustena",
"{plural1} people have overdue donations" : "",
"The date this animal was returned to its owner" : "",
"The period in days before waiting list urgency is increased" : "",
"Sorrel" : "",
"Add a new person" : "Dodaj novu osobu",
"{plural2} animals were reclaimed by their owners" : "",
"White and Brown" : "Bijelo-smedja",
"Import complete with {plural0} error." : "",
"Last Location" : "",
"Extra-Toes Cat (Hemingway Polydactyl)" : "",
"Canadian Hairless" : "Kanadska bezdlaka",
"Bearded Collie" : "Bradati koli",
"New Report" : "",
"{0} results." : "",
"Cow" : "Krava",
"Investigator" : "",
"Expenses::Gas" : "Trsokovi::Plin",
"You can sort tables by clicking on the column headings." : "Tabele mozete sortirati klikom na zaglavlje kolone",
"Belgian Shepherd Laekenois" : "Belgijski ovcar lekenoa",
"Execute Script" : "",
"Include Removed" : "",
"Auto log users out after this many minutes of inactivity" : "",
"Bedlington Terrier" : "Bedlingtonski terijer",
"Camel" : "Kamila",
"Cost record" : "",
"Telephone Bills" : "Telefonski racuni",
"Select a person" : "",
"Reupload animal images every time" : "",
"Publisher Logs" : "Zapisi o objavljivanju",
"Points for matching color" : "",
"Date Brought In" : "",
"Animals per page" : "",
"Vet" : "Veterinar",
"Found animals must have a contact" : "Mora postojati kontakt osoba za pronadjene zivotinje",
"Password successfully changed." : "Lozinka je uspjesno promjenjena.",
"Tortie" : "Pošpricane pjegama i šarama",
"Checked By" : "",
"Publish to AdoptAPet.com" : "Objavi na AdoptPet.com",
"Bobtail" : "Bobtejl",
"The date the transfer is effective from" : "",
"New Fosterer" : "",
"New task detail" : "",
"(use system)" : "",
"Japanese Bobtail" : "Japanska bobtejl",
"Delete Diets" : "",
"The date the animal was FIV/L tested" : "",
"Chocolate" : "Cokolandna",
"The name of the page you want to post to (eg: Your Humane Society). Leave blank to post to your wall." : "",
"Snake" : "Zmija",
"Both" : "",
"Membership Number" : "",
"Tortoise" : "Kornjaca",
"English Setter" : "Engleski seter",
"This animal has special needs" : "",
"Delete Diary" : "",
"Swedish Vallhund" : "Svedski govedar",
"Yellow and Grey" : "Zuto-siva",
"This database is locked and in read-only mode. You cannot add, change or delete records." : "Ova baza podataka je zakljucana. Zapisi se ne mogu se dodavati, mijenjati ili brisati zapisi, omogucen je samo pregled.",
"Movement Type" : "",
"Pheasant" : "Fazan",
"Links" : "Linkovi",
"Points for matching sex" : "",
"Tortie and White" : "",
"Update the daily boarding cost for this animal" : "",
"{plural2} years." : "",
"Species Z-A" : "",
"Littermates" : "",
"Add online form" : "",
"Delete Waiting List" : "",
"{plural2} animals entered the shelter" : "",
"Lost Animal: {0}" : "Izgubljena zivotinja: {0}",
"Pekingese" : "Pekinezer",
"When you use Move > Foster an animal, ASM will automatically return any open foster movement before moving the animal to its new home." : "Kada koristite Premjestaj > Privremeno udomite zivotinju, ASM vas automatski vraca na svaki otvorni premjestaj privremenog udomljavanja prije kreiranja privremenog udomljavanja.",
"Confirm" : "",
"Single Treatment" : "",
"One-Off" : "",
"Gas Bills" : "Racuni za plin",
"Entlebucher" : "Entlebuski planinski pas",
"Stolen" : "Ukradena",
"Spitz" : "Spic",
"Email" : "",
"Crossbreed animal should have different breeds" : "",
"Age Groups" : "",
"{0} - {1} {2}" : "{0} - {1} {2}",
"English Lop" : "",
"Return an animal from another movement" : "Povratak zivotinje nakon boravka na drugom mjestu",
"4 weeks" : "",
"Test Types" : "",
"You can change how ASM looks by choosing a new theme under Settings-Options- Shelter Details-Visual Theme." : "",
"Chart (Pie)" : "",
"Add a new animal" : "Dodajte novu zivotinju",
"Invalid username or password." : "",
"User Accounts" : "",
"Portugese Podengo" : "Portugalski gonic",
"PetFinder Publisher" : "",
"Status" : "",
"Released To Wild {0}" : "",
"Euthanized {0}" : "",
"Foster successfully created." : "",
"Show the size field" : "",
"Mail Merge - {0}" : "",
"View Log" : "",
"Ringneck/Psittacula" : "",
"Turtle" : "Kornjača",
"Pony" : "Poni",
"{plural3} people have overdue donations" : "",
"Inactive - do not include" : "",
"Forbidden" : "",
"Copy of {0}" : "",
"{plural0} animal entered the shelter" : "",
"Death Comments" : "",
"Normal user" : "",
"Briard" : "Brijeski ovcar",
"Row" : "",
"Finnish Lapphund" : "Laponski pas",
"Siberian Husky" : "Sibirski haski",
"Egyptian Mau" : "Egipatski Mau",
"Test Animal" : "",
"Show the litter ID field" : "",
"ACO" : "",
"Shares" : "Dionice",
"Quick Links" : "",
"This will permanently remove this waiting list entry, are you sure?" : "",
"Irish Wolfhound" : "",
"Tiger" : "",
"Dead on arrival" : "",
"Remember me on this computer" : "",
"Forgotten password?" : "",
"Add a diary note" : "Dodavanje zapisa u dnevnik",
"Invalid time, times should be in HH:MM format" : "",
"This person is not flagged as a fosterer and cannot foster animals." : "",
"{0} treatments every {1} days" : "{0} treatments svakog {1} dana",
"Send via email" : "",
"Homecheck Areas" : "",
"All notes upto today" : "",
"Animals" : "",
"{0} {1}: posted to Facebook page {2} by {3}" : "",
"inches" : "",
"Browse sheltermanager.com and install some reports, charts and mail merges into your new system." : "",
"Time on shelter" : "Vrijeme provedeno u azilu",
"The reason the owner wants to part with the animal" : "",
"Edit account" : "",
"{plural0} vaccination needs to be administered today" : "",
"Tests need an animal and at least a required date." : "",
"{plural0} unaltered animal has been adopted in the last month" : "",
"Lilac" : "",
"Loan" : "Zajam",
"Munchkin" : "",
"Books" : "",
"American Wirehair" : "Americki ostrodlaki",
"Type of animal links to show" : "",
"Log Types" : "",
"Iguana" : "Iguana",
"English Springer Spaniel" : "Engleskispringer spanijel",
"Papillon" : "",
"Belgian Shepherd Dog Sheepdog" : "Belgijski ovcar -ovcarski pas",
"Tu" : "",
"Attach" : "",
"Good with children" : "",
"Reserve an animal" : "Rezervirajte zivotinju",
"Adopt an animal" : "Udomite zivotinju",
"{0}: {1} {2} - {3} {4}" : "{0}: {1} {2} - {3} {4}",
"Find animal" : "Nadji zivotinju",
"Map" : "",
"Mar" : "",
"May" : "",
"Default Return Reason" : "",
"{plural3} urgent entries on the waiting list" : "",
"Waiting List" : "Lista cekanja",
"A4" : "",
"View Vouchers" : "",
"White and Liver" : "Bijela i boja cokolade",
"Publisher Breed" : "",
"Diary date cannot be blank" : "",
"Quarantine" : "",
"Last Week" : "",
"The date reported to the shelter" : "",
"Skunk" : "Tvor",
"Dialog title" : "",
"The reason this animal was removed from the waiting list" : "",
"Is this a trial adoption?" : "",
"Tan and White" : "Zuto-bijela",
"Comments contain" : "",
"Set publishing options" : "Postvake opcija objavljivanja",
"Reservation successfully created." : "",
"Satin" : "",
"Unable to Afford" : "",
"Ibizan Hound" : "",
"Edit litters" : "",
"{plural0} year" : "",
"{0} {1} ({2} treatments)" : "",
"Monthly" : "Mjesecno",
"Bengal" : "Benglaska",
"Delete this record" : "",
"This animal has the same name as another animal recently added to the system." : "",
"Asset" : "Imovina",
"Last changed by {0} on {1}" : "",
"Homecheck Date" : "",
"Person Flags" : "",
"The shelter category for this animal" : "",
"Parent" : "",
"Returning" : "",
"{plural1} reservations have been active over a week without adoption" : "",
"SMTP server" : "",
"Remove the online form functionality from menus" : "",
"Goldfish" : "Zlatna ribica",
"Nov" : "",
"The date the reservation is effective from" : "",
"Include held animals" : "",
"Lakeland Terrier" : "Lakeland terijer, jezerski terijer",
"Diary and Messages" : "",
"Domestic Short Hair" : "Domaca kratkodlaka",
"End Of Day" : "",
"Retailer" : "",
"Bank savings account" : "Stedni racun",
"Select" : "Odaberi",
"Make this the default image when creating documents" : "",
"Turkey" : "Ćurka",
"Index" : "",
"Diary note {0} marked completed" : "",
"Stats" : "",
"Weimaraner" : "Vajmarski pticar",
"Long" : "Dugacka",
"State" : "",
"American Fuzzy Lop" : "",
"The date the foster animal will be returned if known" : "",
"Animal '{0}' created with code {1}" : "",
"This animal already has an active reservation." : "",
"Passwords cannot be blank." : "Lozinke nemogu biti prazne",
"{plural2} people have overdue donations" : "",
"Email address" : "",
"Create this message" : "",
"Cornish Rex" : "Cornish Rex",
"This week" : "",
"HTML" : "",
"Markings" : "",
"Internal Locations" : "",
"Remove holds after" : "",
"{plural1} year" : "",
"Remove the neutered fields from animal health details" : "",
"Only show account totals for the current period, which starts on " : "",
"Publisher" : "",
"Champagne DArgent" : "",
"Redbone Coonhound" : "",
"Borzoi" : "Ruski hrt",
"Russian Blue" : "Ruksa plava",
"Log Text" : "",
"Poicephalus/Senegal" : "",
"Edit medical profile" : "",
"Green" : "Zelena",
"More Vaccinations" : "",
"Default video for publishing" : "",
"Change Password" : "Promjeni lozinku",
"Water Bills" : "Racuni za vodu",
"Back" : "",
"Date Put On" : "",
"Download" : "",
"Good with kids" : "",
"Prefill new media notes with the filename if left blank" : "",
"Staff" : "Osoblje",
"Category" : "Kategorija",
"Change Vouchers" : "",
"{plural0} year." : "",
"Lory/Lorikeet" : "Lori papagaji",
"Opening balances" : "Pocetno stanje",
"Pigeon" : "",
"Bichon Frise" : "Kovrdzavi bison",
"Anatolian Shepherd" : "Anatolijski ovcar",
"Treatment" : "",
"American Shorthair" : "Americki kratkodlaki",
"Donations for animals entering the shelter" : "Donacije za prijem zivotinja u skloniste",
"New diary task" : "",
"Media Notes" : "",
"New Document" : "",
"Fr" : "",
"Pomeranian" : "Pomeranac",
"An animal cannot have multiple open movements." : "",
"Income::" : "",
"Add waiting list" : "Dodajte listu cekanja",
"Change Litter" : "",
"Transferred Out {0}" : "",
"The animal name" : "",
"Date put on list" : "Datum stavljanja na listu",
"3 months" : "",
"Shelter stats (this week)" : "",
"Wednesday" : "",
"Remove the insurance number field from the movement screens" : "",
"Merge" : "",
"View Medical Records" : "",
"Animal Codes" : "",
"Generate HTML from this SQL" : "",
"Show the date brought in field" : "",
"Change Cost" : "",
"Create and edit" : "",
"Litter Ref" : "Leglo",
"Animal Shelter Manager" : "",
"Tibetan Terrier" : "Tibetanski terijer",
"Email signature" : "",
"{plural3} year" : "",
"Donkey/Mule" : "Magarac / Mula",
"Clone Animals" : "",
"Entry Donation" : "Prijemna donacija",
"Diets" : "",
"Split pages with a baby/adult prefix" : "",
"Not Available For Adoption" : "",
"Beagle" : "Bigl",
"Cats" : "",
"Details" : "Pojedinosti",
"Edit movement" : "",
"Out Between" : "",
"Animal emblems are the little icons that appear next to animal names in shelter view, the home page and search results." : "",
"Mynah" : "",
"Delete Document from Repository" : "",
"Treat trial adoptions as part of the shelter inventory" : "",
"Add additional field" : "Dodajte dodatno polje",
"Komondor" : "Komondor",
"Date brought in is not valid" : "Datum rodjenja nije validan",
"Email successfully sent to {0}" : "",
"Notes" : "Napomene",
"M (Miscellaneous)" : "O (Ostalo)",
"Received in last month" : "",
"Transferred" : "",
"Comments copied to web preferred media." : "",
"Lizard" : "Gušter",
"This movement cannot be from a retailer when the animal has no prior retailer movements." : "",
"Change Animals" : "",
"Australian Terrier" : "Australijski terijer",
"Waiting List Contact" : "",
"Remove the Litter ID field from animal details" : "",
"Include non-shelter" : "",
"Pharaoh Hound" : "Faraonski pas",
"Reserved" : "Rezervisano",
"Reclaimed" : "",
"New Account" : "",
"Results" : "Rezultati",
"Brown and White" : "Smedje-siva",
"Test Results" : "",
"Account Types" : "",
"FIV/L Tested" : "FIV/L testirana (test na macji virus imunodeficijencije)",
"Read the manual for more information about Animal Shelter Manager." : "",
"Non-Shelter Animal" : "",
"This animal should not be shown in figures and is not in the custody of the shelter" : "",
"Otterhound" : "",
"Edit Users" : "",
"{plural1} people with active reservations have not been homechecked" : "",
"Basenji" : "Basenji",
"You can't have a return without a movement." : "",
"To Other" : "",
"Bird" : "Ptica",
"Cockatiel" : "Ninfa",
"Add Lost Animal" : "",
"Lowchen" : "",
"Here are some things you should do before you start adding animals and people to your database." : "",
"Add a lost animal" : "Dodaj izgubljenu zivotinju",
"The date the litter entered the shelter" : "",
"Found animal entries matching '{0}'." : "",
"Bank deposit account" : "Depozitni racun",
"Transfer successfully created." : "",
"Havanese" : "Gavanski bison",
"Volunteer" : "Volonteri",
"Modify Lookups" : "",
"Execute" : "",
"Alerts" : "",
"Log requires a date." : "",
"Add Investigation" : "",
"{plural1} animals died" : "",
"Cat" : "Macka",
"Edit role" : "",
"Mother" : "",
"{plural3} days." : "",
"Cost date must be a valid date" : "",
"Intakes {0}" : "",
"View Roles" : "",
"Create person records from the selected forms" : "",
"This item is referred to in the database ({0}) and cannot be deleted until it is no longer in use." : "",
"Delete Tests" : "",
"Add Diary" : "",
"Sat" : "",
"From" : "",
"Bonded With" : "",
"Poodle" : "Pudlica",
"Send" : "",
"Next" : "",
"{plural3} reservations have been active over a week without adoption" : "",
"These options change the behaviour of the search box at the top of the page." : "",
"Entry Reason" : "Razlog dolaska",
"Send Emails" : "",
"Edit Reports" : "Uredjivaje izvjestaja",
"Mark Deceased" : "",
"{plural2} months" : "",
"Delete Person" : "",
"Number in litter" : "",
"Select an animal" : "",
"Baby" : "Beba",
"Reservation For" : "",
"Tue" : "",
"Vouchers need an issue and expiry date." : "",
"Coat Type" : "Vrsta dlake",
"Delete this waiting list entry" : "",
"{0} - {1} ({2} {3} aged {4})" : "",
"Copy animal comments to the notes field of the web preferred media for this animal" : "",
"Edit Diary Tasks" : "",
"Gordon Setter" : "Gordonski seter",
"Lilac Tortie" : "",
"Expense" : "Trošak",
"Sphynx (hairless cat)" : "",
"Grade" : "",
"Edit system users" : "Uredjivanje korsnika sistema",
"Any health problems the animal has" : "",
"Online Form: {0}" : "",
"DD = current day" : "",
"Send emails" : "",
"Income::Interest" : "Prihod: Kamate",
"This will permanently remove this record, are you sure?" : "",
"Only show transfers" : "",
"Add Vaccinations" : "",
"No results found." : "",
"Litters need at least a required date and number." : "",
"D (Dog)" : "P (Pas)",
"Foster an animal" : "Privremeno udomite zivotinju",
"This animal is microchipped" : "",
"Page extension" : "",
"Remove the Rabies Tag field from animal health details" : "",
"Sheep Dog" : "Ovcarski pas",
"Adult" : "Odrasla osoba",
"Default view" : "",
"ASM" : "ASM",
"Catahoula Leopard Dog" : "Catahoula leopard",
"Coat Types" : "",
"Entered To" : "",
"Database" : "",
"Edit investigation" : "",
"Edit report template HTML header/footer" : "",
"Settings, System user accounts" : "",
"R" : "",
"This database is locked." : "",
"Swan" : "Labud",
"Caique" : "",
"Update animals with PetLink Microchips" : "",
"Enable visual effects" : "",
"Lost/Found" : "Izgubljeno/Pronadjeno",
"Animal - Additional" : "",
"Moved to animal record {0}" : "",
"Escaped {0}" : "",
"Declawed" : "Odrezanih kandzi",
"Duck" : "Patka",
"Information" : "",
"Rows" : "",
"This person has donations and cannot be removed." : "",
"Log Type" : "",
"SMTP username" : "",
"Edit my diary notes" : "Uredi moje zapise u dnevniku",
"Add account" : "",
"1 week" : "",
"Real name" : "",
"Requested" : "Zahtjevano",
"Your password is currently set to 'password'. This is highly insecure and we strongly suggest you choose a new password." : "",
"Stats period" : "",
"When publishing to third party services, add this extra text to the bottom of all animal descriptions" : "",
"Found animal - {0} {1} [{2}]" : "Pronadjena zivotinja - {0} {1} [{2}]",
"Recently deceased" : "",
"Hidden Comments" : "",
"Return Date" : "",
"This animal is currently at a retailer, it will be automatically returned first." : "",
"Donation From" : "",
"Fila Brasileiro" : "Brazilski pas",
"November" : "",
"{0} - {1} {2} ({3}), contact {4} ({5}) - lost in {6}, postcode {7}, on {8}" : "{0} - {1} {2} ({3}), kontakt {4} ({5}) - izgubljena u {6},postanski broj{7}, dana {8}",
"SMTP password" : "",
"Our shelter does trial adoptions, allow us to mark these on movement screens" : "",
"Default Type" : "",
"Trial Adoption" : "",
"Reserve" : "",
"Don't create a cost record" : "",
"Adoption successfully created." : "",
"Animal Sponsorship" : "Spozorstvo zivotinja",
"Netherland Dwarf" : "",
"High" : "Veoma",
"Shelter" : "Azil",
"Export" : "",
"SQL dump" : "",
"Home page" : "",
"Saturday" : "",
"Include reserved animals" : "",
"Allow entry of two donations on the Move menu screens" : "",
"Show alerts on the home page" : "",
"This will permanently remove this animal, are you sure?" : "",
"Dwarf Eared" : "",
"No data." : "",
"Microchipped" : "",
"Good with dogs" : "",
"Date reported cannot be blank" : "",
"Document" : "Isprava",
"Goat" : "Koza",
"Waiting List - Details" : "",
"Dec" : "",
"Redirect to URL after POST" : "",
"New form field" : "",
"These are the HTML headers and footers used when generating reports." : "",
"Second Vaccination" : "Druga vakcinacija",
"Add form field" : "",
"Trial adoption book" : "",
"Mon" : "",
"This Year" : "",
"Heartworm" : "Srcani crv (Dirofilaria immitis)",
"Rotate image 90 degrees clockwise" : "",
"Areas" : "",
"Age Group" : "",
"All people on file." : "",
"This person has movements and cannot be removed." : "",
"Coat" : "",
"Create note this many days from today, or 9999 to ask" : "",
"Add medical profile" : "",
"From Fostering" : "",
"View animals matching publishing options" : "Pregled zivotinja koje zadovoljavaju opcije objavljivanja",
"Diary Task: {0}" : "",
"The date the animal was brought into the shelter" : "",
"This will permanently remove the selected roles, are you sure?" : "",
"Date of last owner contact" : "",
"years" : "godina",
"Exotic Shorthair" : "Egzoticna kratkodlaka",
"Test Performed" : "",
"Move" : "Premjestanje",
"Include animals in the following locations" : "",
"Edit online form HTML header/footer" : "",
"Cancelled Reservation" : "Okazana rezervacija",
"Guinea fowl" : "Biserke",
"Good with cats" : "",
"Ginger and White" : "",
"Use the icon in the lower right of notes fields to view them in a separate window." : "Za pregled napomena u odvojenom prozoru koristite ikonu u donjem desnom dijelu polja za napomene",
"Area" : "",
"Cost Types" : "",
"Adoption" : "",
"Other Account" : "",
"Units" : "",
"Add a vaccination" : "Dodavanje vakcinacija",
"Coonhound" : "Gonic rakuna",
"Name cannot be blank" : "Ime je obavezno polje",
"Hotot" : "",
"Pit Bull Terrier" : "Pitbul terijer",
"Some info text" : "",
"Add person" : "",
"Scale published animal images to" : "",
"Change Person" : "",
"Match" : "",
"July" : "",
"Sugar Glider" : "",
"{plural0} animal is not available for adoption" : "",
"Body" : "",
"American Sable" : "",
"Lost Animal" : "Izgubljena zivotinja",
"New Template" : "",
"Chesapeake Bay Retriever" : "Cesapik retriver",
"View Waiting List" : "",
"Border Collie" : "Granicarski koli",
"Left shelter" : "Datum napustanja azila",
"German Shepherd Dog" : "Njemacki ovcarski pas",
"Or move this diary on to" : "",
"Movement Types" : "",
"Oriental Short Hair" : "Orjentalna kratkodlaka",
"Notes about the death of the animal" : "",
"Result" : "",
"Results for '{0}'." : "",
"Singapura" : "Singapurska",
"Start Date" : "",
"Add role" : "",
"Delete Vouchers" : "",
"Warmblood" : "",
"Delete Lost Animal" : "",
"cm" : "",
"{0} rows affected." : "",
"Treatments" : "Tretmani",
"Low" : "",
"Cairn Terrier" : "Kern terijer",
"Show PDF files inline instead of sending them as attachments" : "",
"Returns {0}" : "",
"New Guinea Singing Dog" : "",
"Where this animal is located within the shelter" : "",
"Jun" : "",
"{plural3} months." : "",
"Jul" : "",
"Animal cannot be deceased before it was brought to the shelter" : "Zivotinja ne moze uginuti prije nego sto je dospjela u skloniste",
"Llama" : "Lama",
"Active" : "",
"Last, First" : "",
"Petit Basset Griffon Vendeen" : "Mali vendeski grifon baset",
"Bloodhound" : "Pas Sv. Huberta",
"Feral" : "",
"Footer" : "",
"Change Waiting List" : "",
"2 weeks" : "",
"Income::Donation" : "Prihod:Donacija",
"Waiting List - Additional" : "",
"Valid tokens for the subject and text" : "",
"An age in years, eg: 1, 0.5" : "",
"Number" : "Broj",
"Add a person" : "",
"Fish" : "Riba",
"Upload Photo" : "",
"{0} treatments every {1} years" : "",
"Received" : "",
"View Media" : "",
"Membership Expiry" : "",
"Afghan Hound" : "Avganistanski pas",
"Coton de Tulear" : "Tulearski pas",
"View publishing logs" : "Pregled zapisa objavljivanja",
"Shelter view" : "",
"Maine Coon" : "Maine Coon",
"Brown and Black" : "Smedje-crna",
"Match against other lost/found animals" : "",
"Diary Tasks" : "Zadaci u dnevniku",
"Lost and found entries must have a contact" : "",
"Snowshoe" : "",
"Additional Fields" : "Dodatna polja",
"This Month" : "",
"Neuter/Spay" : "Kastrirana",
"Medical" : "",
"Grey" : "Siva",
"Email this person" : "",
"Out SubTotal" : "",
"Log requires a person." : "",
"No adjustment" : "",
"{0} record(s) match the mail merge." : "",
"Stray" : "Lutalica",
"Altered Date" : "",
"A person is required for this movement type." : "",
"Burmilla" : "Burmila",
"When you use Move > Adopt an animal, ASM will automatically return any open foster or retailer movement before creating the adoption." : "Kada koristite Premjestaj > Udomljavanje zivotinja, ASM vas automatski vraca na svaki otvorni premjestaj kod privremenog udomitelja ili prodavca prije kreiranja udomljavanja.",
"{plural3} results found in {1} seconds. Order: {2}" : "",
"{plural3} weeks" : "",
"Include retailer animals" : "",
"User roles" : "Vrsta korisnika",
"On Shelter" : "",
"Similar Animal" : "",
"English Pointer" : "Engleski kratkodlaki pticar",
"This animal died outside the care of the shelter, and the death should be kept out of reports" : "",
"Collie" : "Koli",
"Sunday" : "",
"Xoloitzcuintle/Mexican Hairless" : "Xolo/Meksicki golokozi psi",
"Good with Cats" : "Dobar odons s mackama",
"Remaining" : "",
"Settings, Reports" : "",
"Income from an on-site shop" : "Prihod od prodaje u prodavnici",
"Angora Rabbit" : "Angorski zec",
"Add test" : "",
"Died off shelter" : "",
"Change Media" : "",
"Friday" : "",
"Pionus" : "",
"Ends after" : "",
"Expiry" : "",
"No results." : "",
"Reason" : "Razlog",
"Omit criteria" : "",
"Transfer an animal" : "Premjestite zivotinju",
"Separate waiting list rank by species" : "",
"By" : "",
"Unable to Cope" : "",
"Adoptable" : "",
"Merge another person into this one" : "",
"English Cocker Spaniel" : "Engleski koker spanijel",
"Change Lost Animal" : "",
"{plural0} urgent entry on the waiting list" : "",
"This animal should not be included when publishing animals for adoption" : "",
"Date Lost" : "datum gubljenja",
"To Fostering" : "",
"Number of Tasks" : "",
"The date the animal died" : "",
"Import complete with {plural2} errors." : "",
"Hold the animal until this date or blank to hold indefinitely" : "",
"View PDF" : "",
"Edit roles" : "Uredjivanje ovasti odgovornih osoba",
"Facebook page name" : "",
"You must supply a code." : "Potrebno je unijeti sifru",
"SQL" : "",
"Polish" : "Poljski",
"Remove the coat type field from animal details" : "",
"Adoption movements must have a valid adoption date." : "",
"Waiting List {0}" : "",
"Adopted Transferred In {0}" : "",
"Gerbil" : "",
"Sun" : "",
"Greater Swiss Mountain Dog" : "Veliki svajcarski planiski pas",
"Date removed" : "",
"Photo successfully uploaded." : "",
"Finch" : "Zeba",
"Most recently changed" : "",
"Yorkshire Terrier Yorkie" : "Joksirski terijer",
"Preview" : "",
"The base color of this animal" : "",
"Date of Birth" : "",
"Tan and Black" : "Zuto-crna",
"People" : "",
"Area Postcode" : "Postanski broj mjesta",
"Edit user" : "",
"Incomplete notes upto today" : "",
"Edit diary" : "Uredjivanje dnevnika",
"Receipt No" : "",
"{plural0} month" : "",
"Server clock adjustment" : "",
"This animal has been declawed" : "",
"Show a minimap of the address on person screens" : "",
"All existing animals, people, movements and donations in your database will be REMOVED before importing the CSV file. This removal cannot be reversed." : "",
"Mountain Cur" : "",
"Tibetan Mastiff" : "Tibetanski mastif",
"Default Log Filter" : "",
"Rat Terrier" : "",
"Weight" : "Tezina",
"Set to 0 to never update urgencies." : "",
"December" : "",
"Edit My Diary Notes" : "",
"Generate letters" : "",
"All retailers on file." : "",
"Lifetime" : "Dozivotno",
"Shelter Animals" : "",
"Pointer" : "Gonic",
"Rosella" : "",
"Marriage/Relationship split" : "Razvod/ prekid relacije",
"Set to 0 for no limit." : "",
"Thank you for choosing Animal Shelter Manager for your shelter!" : "",
"Foster Book" : "Knjiga privremenih udomitelja",
"Kuvasz" : "",
"Find Person" : "Pretraga osoba",
"Trial ends on" : "",
"Bank account interest" : "",
"Shelter stats (today)" : "",
"Number of fields" : "",
"Waiting list donations" : "Donacije za listu cekanja",
"Not available for adoption" : "Nije dostupana za udomljavanje",
"Remove the location unit field from animal details" : "",
"Golden Retriever" : "Zlatni retriver",
"Remove the city/state fields from person details" : "",
"HTML Publishing Templates" : "HTML predlozak za objavu",
"Entry reason" : "",
"Will this owner give a donation?" : "",
"Chow Chow" : "Cau Cau",
"Publish to RescueGroups.org" : "Objavi na Rescuegroups.org",
"New User" : "",
"Receipt/Invoice" : "",
"Use HTML5 client side image scaling where available to speed up image uploads" : "",
"or estimated age in years" : "",
"Balance" : "",
"Alphabetically A-Z" : "",
"Lost from" : "",
"More Tests" : "",
"Show quick links on the home page" : "",
" days." : "",
"Delete this person" : "",
"{plural0} week." : "",
"Entered (newest first)" : "",
"English Spot" : "",
"Default Death Reason" : "",
"Insurance" : "",
"Chart (Line)" : "",
"Saving..." : "",
"Updated database to version {0}" : "Baza podataka azurirana u verziju {0}",
"Attach link" : "",
"One Off" : "",
"Cruelty Case" : "",
"Received in last week" : "",
"The entry reason for this animal" : "",
"5 Year" : "Petogodisnje",
"{plural1} animals were adopted" : "",
"Save" : "Sacuvaj",
"French Bulldog" : "Francuski buldog",
"Banned" : "",
"{plural0} animal died" : "",
"Board and Food" : "",
"Asset::Premises" : "Imovina::Objekti",
"View Cost" : "",
"Enable lost and found functionality" : "",
"February" : "",
"Kakariki" : "Kakariki papagaj",
"are sent to" : "",
"There is not enough information in the form to create a person record (need a surname)." : "",
"Lost" : "Izgubljena",
"White and Grey" : "Bijelo-siva",
"Left Margin" : "",
"Remove" : "",
"Annually" : "Godisnje",
"April" : "",
"New Donation" : "",
"Area where the animal was found" : "",
"Morgan" : "",
"Incoming Forms" : "",
"Display a search button at the right side of the search box" : "",
"{plural0} animal was reclaimed by its owner" : "",
"{plural3} vaccinations need to be administered today" : "",
"Overdue" : "",
"View Incoming Forms" : "",
"The URL is the address of a web resource, eg: www.youtube.com/watch?v=xxxxxx" : "",
"Beveren" : "",
"Black Tortie" : "",
"Description cannot be blank" : "",
"These values are required for correct operation of the system. ONLY change them if you are translating to another language." : ""
}
|
aubzen/sheltermanager
|
src/locale/locale_bs.py
|
Python
|
gpl-3.0
| 92,062
|
[
"Amber",
"VisIt"
] |
2c1db226f68d8b2b9fa501b876f267625dfedb7ed07d9db56539aa9cbbed0391
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import sys
from tabulate import tabulate
from pymatgen import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.structure_matcher import StructureMatcher, \
ElementComparator
"""
A master convenience script with many tools for vasp and structure analysis.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "4.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "Aug 13 2016"
def convert_fmt(args):
if len(args.filenames) != 2:
print("File format conversion takes in only two filenames.")
s = Structure.from_file(args.filenames[0],
primitive="prim" in args.filenames[1].lower())
s.to(filename=args.filenames[1])
def analyze_symmetry(args):
tolerance = args.symmetry
t = []
for filename in args.filenames:
s = Structure.from_file(filename, primitive=False)
finder = SpacegroupAnalyzer(s, tolerance)
dataset = finder.get_symmetry_dataset()
t.append([filename, dataset["international"], dataset["number"],
dataset["hall"]])
print(tabulate(t, headers=["Filename", "Int Symbol", "Int number", "Hall"]))
def analyze_localenv(args):
bonds = {}
for bond in args.localenv:
toks = bond.split("=")
species = toks[0].split("-")
bonds[(species[0], species[1])] = float(toks[1])
for filename in args.filenames:
print("Analyzing %s..." % filename)
data = []
s = Structure.from_file(filename)
for i, site in enumerate(s):
for species, dist in bonds.items():
if species[0] in [sp.symbol
for sp in site.species_and_occu.keys()]:
dists = [d for nn, d in s.get_neighbors(site, dist)
if species[1] in
[sp.symbol for sp in nn.species_and_occu.keys()]]
dists = ", ".join(["%.3f" % d for d in sorted(dists)])
data.append([i, species[0], species[1], dists])
print(tabulate(data, headers=["#", "Center", "Ligand", "Dists"]))
def compare_structures(args):
filenames = args.filenames
if len(filenames) < 2:
print("You need more than one structure to compare!")
sys.exit(-1)
try:
structures = [Structure.from_file(fn) for fn in filenames]
except Exception as ex:
print("Error converting file. Are they in the right format?")
print(str(ex))
sys.exit(-1)
m = StructureMatcher() if args.group == "species" \
else StructureMatcher(comparator=ElementComparator())
for i, grp in enumerate(m.group_structures(structures)):
print("Group {}: ".format(i))
for s in grp:
print("- {} ({})".format(filenames[structures.index(s)],
s.formula))
print()
def analyze_structures(args):
if args.convert:
convert_fmt(args)
elif args.symmetry:
analyze_symmetry(args)
elif args.group:
compare_structures(args)
elif args.localenv:
analyze_localenv(args)
|
johnson1228/pymatgen
|
pymatgen/cli/pmg_structure.py
|
Python
|
mit
| 3,397
|
[
"VASP",
"pymatgen"
] |
f9d32fa97fca00fc4c01dff999d80a091625c3d3a27096d50abd2056203c6fda
|
from setuptools import setup
setup(
name='pynnotator',
version='2.0',
description='A Python Annotation Framework for VCFs using multiple tools (Ex. VEP, SnpEff and SnpSift) and databases (Ex. 1000genomes, dbSNP and dbnfsp) .',
url='http://github.com/raonyguimaraes/pynnotator',
author='Raony Guimaraes',
author_email='raony@torchmed.com',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
keywords='genome exome annotation rare diseases',
license='BSD-3',
packages=['pynnotator', 'pynnotator.helpers', 'pynnotator.tests'],
install_requires=[
'wheel',
'pysam',
'cython',
'distro',
],
test_suite='nose.collector',
tests_require=['nose'],
#scripts=['bin/pynnotator'],
entry_points={ # Optional
'console_scripts': [
'pynnotator=pynnotator.main:main',
],
},
include_package_data=True,
zip_safe=False)
|
raonyguimaraes/pynnotator
|
setup.py
|
Python
|
bsd-3-clause
| 2,215
|
[
"pysam"
] |
5217573d1d16b30d39b422400b289da96c1800b42ac45ae68dcd03fb578f4d94
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from preggy import expect
from octopus import TornadoOctopus
from octopus.limiter.redis.per_domain import Limiter as PerDomainRedisLimiter
from octopus.limiter.in_memory.per_domain import Limiter as PerDomainInMemoryLimiter
from tests import TestCase
class TestTornadoCoreLimited(TestCase):
def setUp(self):
super(TestTornadoCoreLimited, self).setUp()
self.response = None
self.url = None
self.responses = {}
self.cache_miss = set()
self.redis.flushall()
def handle_url_response(self, url, response):
self.responses[url] = response
def handle_limiter_miss(self, url):
self.cache_miss.add(url)
def test_should_not_get_more_than_one_url_for_same_domain_concurrently(self):
limiter = PerDomainInMemoryLimiter(
{'http://g1.globo.com': 1},
{'http://globoesporte.globo.com': 1}
)
otto = TornadoOctopus(concurrency=10, auto_start=True, limiter=limiter)
otto.enqueue('http://globoesporte.globo.com', self.handle_url_response)
otto.enqueue('http://globoesporte.globo.com/futebol/times/flamengo/', self.handle_url_response)
otto.enqueue('http://g1.globo.com', self.handle_url_response)
otto.enqueue('http://g1.globo.com/economia', self.handle_url_response)
otto.wait(2)
expect(self.responses).to_length(4)
expect(list(limiter.domain_count.keys())).to_be_like(['http://g1.globo.com', 'http://globoesporte.globo.com'])
def test_should_call_limiter_miss_twice(self):
limiter = PerDomainRedisLimiter(
{'http://g1.globo.com': 1},
{'http://globoesporte.globo.com': 1},
redis=self.redis
)
limiter.subscribe_to_lock_miss(self.handle_limiter_miss)
otto = TornadoOctopus(concurrency=10, auto_start=True, limiter=limiter)
otto.enqueue('http://globoesporte.globo.com/', self.handle_url_response)
otto.enqueue('http://globoesporte.globo.com/futebol/times/flamengo/', self.handle_url_response)
otto.enqueue('http://g1.globo.com/', self.handle_url_response)
otto.enqueue('http://g1.globo.com/economia/', self.handle_url_response)
otto.wait()
expect(self.cache_miss).to_length(2)
|
heynemann/octopus
|
tests/test_tornado_octopus_limited.py
|
Python
|
mit
| 2,319
|
[
"Octopus"
] |
bf50a76dd02679bac3316102bccc3ac1599dc6638156292c81d28a984a1b2681
|
import constants as c
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy
import os
import qcio
import qcplot
import qcutils
import statsmodels.api as sm
import sys
import time
nfig = 0
plotwidth = 10.9
plotheight = 7.5
# load the control file
cf = qcio.load_controlfile(path='../controlfiles')
if len(cf)==0: sys.exit()
min_n = int(cf["General"]["minimum_number"])
min_r = float(cf["General"]["minimum_correlation"])
# get the input file name
fname = qcio.get_infilenamefromcf(cf)
if not os.path.exists(fname):
print " compare_ah: Input netCDF file "+fname+" doesn't exist"
sys.exit()
# read the input file and return the data structure
ds = qcio.nc_read_series(fname)
if len(ds.series.keys())==0: print time.strftime('%X')+' netCDF file '+fname+' not found'; sys.exit()
# get the site name
SiteName = ds.globalattributes['site_name']
# get the time step
ts = int(ds.globalattributes['time_step'])
# get the datetime series
DateTime = ds.series['DateTime']['Data']
# get the initial start and end dates
# find the start index of the first whole day (time=00:30)
si = qcutils.GetDateIndex(DateTime,str(DateTime[0]),ts=ts,default=0,match='startnextday')
# find the end index of the last whole day (time=00:00)
ei = qcutils.GetDateIndex(DateTime,str(DateTime[-1]),ts=ts,default=-1,match='endpreviousday')
# clip the datetime series to a whole number of days
DateTime = DateTime[si:ei+1]
StartDate = DateTime[0]
EndDate = DateTime[-1]
print time.strftime('%X')+' Start date; '+str(StartDate)+' End date; '+str(EndDate)
Hdh = ds.series['Hdh']['Data'][si:ei+1]
Month = ds.series['Month']['Data'][si:ei+1]
nrecs = len(DateTime)
nperhr = int(float(60)/ts+0.5)
nperday = int(float(24)*nperhr+0.5)
ndays = nrecs/nperday
nrecs=ndays*nperday
Ah_7500_name = str(cf['Variables']['Ah_7500'])
Ah_HMP_name = str(cf['Variables']['Ah_HMP'])
# get local data series from the data structure
ah_7500_30min_1d,flag,attr = qcutils.GetSeriesasMA(ds,Ah_7500_name,si=si,ei=ei)
ah_HMP1_30min_1d,flag,attr = qcutils.GetSeriesasMA(ds,Ah_HMP_name,si=si,ei=ei)
month_30min_1d,flag,attr = qcutils.GetSeriesasMA(ds,'Month',si=si,ei=ei)
# mask data points unless both 7500 and HMP present
mask = numpy.ma.mask_or(ah_7500_30min_1d.mask,ah_HMP1_30min_1d.mask)
ah_7500_30min_1d = numpy.ma.array(ah_7500_30min_1d,mask=mask)
ah_HMP1_30min_1d = numpy.ma.array(ah_HMP1_30min_1d,mask=mask)
month_30min_1d = numpy.ma.array(month_30min_1d,mask=mask)
# reshape the 1D time series into 2D arrays
ah_7500_30min_2d = numpy.ma.reshape(ah_7500_30min_1d,[ndays,nperday])
ah_HMP1_30min_2d = numpy.ma.reshape(ah_HMP1_30min_1d,[ndays,nperday])
month_30min_2d = numpy.ma.reshape(month_30min_1d,[ndays,nperday])
# get the daily statistics
month_daily_avg = numpy.ma.average(month_30min_2d,axis=1)
ah_7500_daily_avg = numpy.ma.average(ah_7500_30min_2d,axis=1)
ah_HMP1_daily_avg = numpy.ma.average(ah_HMP1_30min_2d,axis=1)
ah_7500_daily_std = numpy.ma.std(ah_7500_30min_2d,axis=1)
ah_HMP1_daily_std = numpy.ma.std(ah_HMP1_30min_2d,axis=1)
ah_7500_daily_max = numpy.ma.max(ah_7500_30min_2d,axis=1)
ah_HMP1_daily_max = numpy.ma.max(ah_HMP1_30min_2d,axis=1)
ah_7500_daily_min = numpy.ma.min(ah_7500_30min_2d,axis=1)
ah_HMP1_daily_min = numpy.ma.min(ah_HMP1_30min_2d,axis=1)
ah_avgdiff_daily = ah_7500_daily_avg - ah_HMP1_daily_avg
ah_stdratio_daily = ah_HMP1_daily_std/ah_7500_daily_std
ah_7500range_daily = ah_7500_daily_max - ah_7500_daily_min
ah_HMP1range_daily = ah_HMP1_daily_max - ah_HMP1_daily_min
ah_rangeratio_daily = (ah_HMP1_daily_max - ah_HMP1_daily_min)/(ah_7500_daily_max - ah_7500_daily_min)
DT_daily = DateTime[0:nrecs:nperday]
# time series plot of daily averaged absolute humidities and differencves
nfig = nfig + 1
fig = plt.figure(nfig,figsize=(plotwidth,plotheight))
plt.figtext(0.5,0.95,SiteName,horizontalalignment='center',size=16)
qcplot.tsplot(DT_daily,ah_7500_daily_avg,sub=[3,1,1],ylabel='Ah_7500')
qcplot.tsplot(DT_daily,ah_HMP1_daily_avg,sub=[3,1,2],ylabel='Ah_HMP_01')
qcplot.tsplot(DT_daily,ah_avgdiff_daily,sub=[3,1,3],ylabel='7500-HMP')
# scatter plots of absolute humidities by month
MnthList = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
nfig = nfig + 1
fig = plt.figure(nfig,figsize=(plotwidth,plotheight))
plt.figtext(0.5,0.95,SiteName,horizontalalignment='center',size=16)
j = 0
for i in [1,2,3,4,5,6,7,8,9,10,11,12]:
j = j + 1
index = numpy.where(month_30min_1d==i)[0]
if len(index)!=0:
y = ah_HMP1_30min_1d[index]
x = ah_7500_30min_1d[index]
if j in [1,2,3,4,5,6,7,8,9]:
xlabel = None
else:
xlabel = '7500 (g/m3)'
if j in [2,3,5,6,8,9,11,12]:
ylabel = None
else:
ylabel = 'HMP (g/m3)'
qcplot.xyplot(x,y,sub=[4,3,j],regr=2,title=MnthList[i-1],xlabel=xlabel,ylabel=ylabel)
plt.tight_layout()
# daily regressions
slope = numpy.ones(ndays)
offset = numpy.zeros(ndays)
correl = numpy.ones(ndays)
number = numpy.zeros(ndays)
for i in range(0,ndays-1):
x = ah_7500_30min_2d[i,:]
y = ah_HMP1_30min_2d[i,:]
x_nm = numpy.ma.compressed(x)
x_nm = sm.add_constant(x_nm,prepend=False)
y_nm = numpy.ma.compressed(y)
if len(y_nm)>1:
resrlm = sm.RLM(y_nm,x_nm,M=sm.robust.norms.TukeyBiweight()).fit()
coefs = resrlm.params
r = numpy.ma.corrcoef(x,y)
number[i] = numpy.ma.count(x)
slope[i] = coefs[0]
offset[i] = coefs[1]
correl[i] = r[0][1]
correl2 = numpy.ma.masked_where((correl<min_r)|(number<min_n),correl)
number2 = numpy.ma.masked_where((correl<min_r)|(number<min_n),number)
slope2 = numpy.ma.masked_where((correl<min_r)|(number<min_n),slope)
offset2 = numpy.ma.masked_where((correl<min_r)|(number<min_n),offset)
sdratio2 = numpy.ma.masked_where((correl<min_r)|(number<min_n),ah_stdratio_daily)
nfig = nfig + 1
figts = plt.figure(nfig,figsize=(plotwidth,plotheight))
plt.figtext(0.5,0.95,SiteName,horizontalalignment='center',size=16)
qcplot.tsplot(DT_daily,correl2,sub=[5,1,1],ylabel='Correl',colours=number)
qcplot.tsplot(DT_daily,number2,sub=[5,1,2],ylabel='Number',colours=correl)
qcplot.tsplot(DT_daily,slope2,sub=[5,1,3],ylabel='Slope',colours=correl)
qcplot.tsplot(DT_daily,offset2,sub=[5,1,4],ylabel='Offset',colours=correl)
qcplot.tsplot(DT_daily,sdratio2,sub=[5,1,5],ylabel='Sd(HMP)/Sd(7500)',colours=correl)
for i in range(0,ndays-1):
x = ah_7500_30min_2d[i,:]
y = ah_HMP1_30min_2d[i,:]
x_nm = numpy.ma.compressed(x)
y_nm = numpy.ma.compressed(y)
nx = numpy.ma.count(x_nm)
ny = numpy.ma.count(y_nm)
r = numpy.ma.corrcoef(x_nm,y_nm)
if (nx<min_n) or (r[0][1]<min_r):
ah_7500_30min_2d[i,:].mask = True
ah_HMP1_30min_2d[i,:].mask = True
class PointBrowser:
def __init__(self):
self.si = 0
self.ei = 0
self.start_ind_day = 0
self.ind_30min = 0
self.start_ind_30min = 0
self.end_ind = 0
self.nfig = nfig
self.slope = []
self.offset = []
self.correl = []
self.start_date = []
self.end_date = []
self.stdratio = []
self.rangeratio = []
self.last_index = []
def onpress(self, event):
#if self.ind_day is None: return
if event.key=='n': self.new()
if event.key=='f': self.forward()
if event.key=='b': self.backward()
if event.key=='q': self.quitprog()
if event.key not in ('n', 'f', 'b', 'q'): return
def new(self):
print 'Creating new XY plot ...'
# save the summary results from the last period
if self.ei!=0:
self.start_date.append(DT_daily[self.si])
self.end_date.append(DT_daily[self.ei])
self.slope.append(self.coefs[0])
self.offset.append(self.coefs[1])
self.correl.append(self.r[0][1])
self.stdratio.append(self.sd)
self.rangeratio.append(self.rr)
self.si = self.ei
# put up the new XY plot
self.nfig += 1
self.figxy = plt.figure(self.nfig,figsize=(5,4))
self.figxy.subplots_adjust(bottom=0.15,left=0.15)
self.axxy = self.figxy.add_subplot(111)
self.axxy.set_xlabel('Ah_7500 (g/m3)')
self.axxy.set_ylabel('Ah_HMP (g/m3)')
plt.show()
def forward(self):
self.ei += 1
self.update()
def backward(self):
self.ei += -1
self.update()
def update(self):
self.ei = numpy.clip(self.ei,self.si,len(DT_daily)-1)
x = ah_7500_30min_2d[self.ei,:]
y = ah_HMP1_30min_2d[self.ei,:]
if min([numpy.ma.count(x),numpy.ma.count(y)])<=0:
print DT_daily[self.ei],'%g'%(numpy.ma.count(x))
else:
print DT_daily[self.ei],'%g %.3f %.3f %.3f'%(numpy.ma.count(x),numpy.ma.corrcoef(x,y)[0][1],
numpy.ma.polyfit(numpy.ma.copy(x),numpy.ma.copy(y),1)[0],
numpy.ma.polyfit(numpy.ma.copy(x),numpy.ma.copy(y),1)[1])
x = ah_7500_30min_2d[self.si:self.ei+1,:]
y = ah_HMP1_30min_2d[self.si:self.ei+1,:]
x_nm = numpy.ma.compressed(x)
y_nm = numpy.ma.compressed(y)
if len(x_nm)!=0:
self.r = numpy.corrcoef(x_nm,y_nm)
self.sd = numpy.std(y_nm)/numpy.std(x_nm)
self.rr = (numpy.max(y_nm)-numpy.min(y_nm))/(numpy.max(x_nm)-numpy.min(x_nm))
resrlm = sm.RLM(y_nm,sm.add_constant(x_nm,prepend=False),M=sm.robust.norms.TukeyBiweight()).fit()
self.coefs = resrlm.params
m = self.coefs[0]; b = self.coefs[1]
self.axxy.cla()
self.axxy.plot(x_nm,y_nm,'b.')
self.axxy.set_xlabel('Ah_7500 (g/m3)')
self.axxy.set_ylabel('Ah_HMP (g/m3)')
self.axxy.plot(x_nm,self.coefs[0]*x_nm+self.coefs[1],'r--',linewidth=3)
eqnstr = 'y = %.3fx + %.3f, r = %.3f'%(self.coefs[0],self.coefs[1],self.r[0][1])
self.axxy.text(0.5,0.875,eqnstr,fontsize=8,horizontalalignment='center',transform=self.axxy.transAxes)
#print DT_daily[self.ei],'%g %.3f %.3f %.3f'%(numpy.ma.count(x),numpy.ma.corrcoef(x,y)[0][1],m,b)
#else:
#print DT_daily[self.ei],numpy.ma.count(x),numpy.ma.corrcoef(x,y)[0][1]
#print str(DT_daily[self.ei])+'%g %.3f'%(numpy.ma.count(x),numpy.ma.corrcoef(x,y)[0][1])
#m = numpy.ma.zeros(1); m.mask = True; m=m[0]
#b = numpy.ma.zeros(1); b.mask = True; b=b[0]
dtstr = str(DT_daily[self.si]) + ' to ' + str(DT_daily[self.ei])
self.axxy.text(0.5,0.925,dtstr,fontsize=8,horizontalalignment='center',transform=self.axxy.transAxes)
self.figxy.canvas.draw()
def quitprog(self):
self.start_date.append(DT_daily[self.si])
self.end_date.append(DT_daily[self.ei])
self.slope.append(self.coefs[0])
self.offset.append(self.coefs[1])
self.correl.append(self.r[0][1])
self.stdratio.append(self.sd)
self.rangeratio.append(self.rr)
# print everything
print '*** all results ***'
for i in range(len(self.slope)):
eqnstr = '%.3f, %.3f, %.3f, %.3f, %.3f'%(self.slope[i],self.offset[i],self.correl[i],self.stdratio[i],self.rangeratio[i])
print self.start_date[i], self.end_date[i], eqnstr
# print the linear fit for correcting Ah_7500_Av
print '*** corrections for Ah_7500_Av ***'
for i in range(len(self.slope)):
eqnstr = '%.3f,%.3f'%(self.slope[i],self.offset[i])
print str(i)+'='+'"['+"'"+self.start_date[i].strftime('%Y-%m-%d %H:%M')+"'"+','\
+"'"+self.end_date[i].strftime('%Y-%m-%d %H:%M')+"'"+","+eqnstr+']"'
# print the ratio of the standard deviations for correcting the covariances
print '*** corrections for covariances UxA, UyA and UzA'
for i in range(len(self.slope)):
eqnstr = '%.3f'%(self.stdratio[i])
print str(i)+'='+'"['+"'"+self.start_date[i].strftime('%Y-%m-%d %H:%M')+"'"+','\
+"'"+self.end_date[i].strftime('%Y-%m-%d %H:%M')+"'"+","+eqnstr\
+',0.0]"'
plt.close('all')
browser = PointBrowser()
figts.canvas.mpl_connect('key_press_event', browser.onpress)
plt.show()
|
OzFlux/OzFluxQC
|
scripts/compare_ah.py
|
Python
|
gpl-3.0
| 12,363
|
[
"NetCDF"
] |
d306f11e4829b3bf7c39b318d5c7407c214448b7631332b6cda3d9c673d13396
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Testmodule for the Wang-Landau Reaction Ensemble.
"""
from __future__ import print_function
import numpy as np
import unittest as ut
import espressomd
from espressomd import code_info
from espressomd import analyze
from espressomd import integrate
from espressomd.interactions import *
from espressomd import reaction_ensemble
from espressomd import system
class ReactionEnsembleTest(ut.TestCase):
"""Test the core implementation of the wang_landau reaction ensemble.
Create a harmonic bond between the two reacting particles. Therefore the
potential energy is quadratic in the elongation of the bond and
therefore the density of states is known as the one of the harmonic
oscillator
"""
# System parameters
#
box_l = 6 * np.sqrt(2)
temperature = 1.0
# Integration parameters
#
system = espressomd.System(box_l=[box_l, box_l, box_l])
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
system.time_step = 0.01
system.cell_system.skin = 0
system.cell_system.set_n_square(use_verlet_lists=False)
#
# Setup System
#
N0 = 1 # number of titratable units
K_diss = 0.0088
system.part.add(id=0, pos=[0, 0, 0] * system.box_l, type=3)
system.part.add(id=1, pos=[1.0, 1.0, 1.0] * system.box_l / 2.0, type=1)
system.part.add(id=2, pos=np.random.random() * system.box_l, type=2)
system.part.add(id=3, pos=np.random.random() * system.box_l, type=2)
h = HarmonicBond(r_0=0, k=1)
system.bonded_inter[0] = h
system.part[0].add_bond((h, 1))
RE = reaction_ensemble.WangLandauReactionEnsemble(
temperature=temperature, exclusion_radius=0)
RE.add_reaction(gamma=K_diss, reactant_types=[0], reactant_coefficients=[
1], product_types=[1, 2], product_coefficients=[1, 1], default_charges={0: 0, 1: -1, 2: +1})
system.setup_type_map([0, 1, 2, 3])
# initialize wang_landau
# generate preliminary_energy_run_results here, this should be done in a
# seperate simulation without energy reweighting using the update energy
# functions
np.savetxt("energy_boundaries.dat", np.c_[
[0, 1], [0, 0], [9, 9]], delimiter='\t', header="nbar E_potmin E_potmax")
RE.add_collective_variable_degree_of_association(
associated_type=0, min=0, max=1, corresponding_acid_types=[0, 1])
RE.add_collective_variable_potential_energy(
filename="energy_boundaries.dat", delta=0.05)
RE.set_wang_landau_parameters(
final_wang_landau_parameter=1e-2, do_not_sample_reaction_partition_function=True, full_path_to_output_filename="WL_potential_out.dat")
def test_wang_landau_output(self):
while True:
try:
self.RE.reaction()
for i in range(2):
self.RE.displacement_mc_move_for_particles_of_type(3)
except reaction_ensemble.WangLandauHasConverged: # only catch my exception
break
# test as soon as wang_landau has converged (throws exception then)
nbars, Epots, WL_potentials = np.loadtxt(
"WL_potential_out.dat", unpack=True)
mask_nbar_0 = np.where(np.abs(nbars - 1.0) < 0.0001)
Epots = Epots[mask_nbar_0]
Epots = Epots[1:]
WL_potentials = WL_potentials[mask_nbar_0]
WL_potentials = WL_potentials[1:]
expected_canonical_potential_energy = np.sum(np.exp(WL_potentials) * Epots * np.exp(
-Epots / self.temperature)) / np.sum(np.exp(WL_potentials) * np.exp(-Epots / self.temperature))
expected_canonical_squared_potential_energy = np.sum(np.exp(WL_potentials) * Epots**2 * np.exp(
-Epots / self.temperature)) / np.sum(np.exp(WL_potentials) * np.exp(-Epots / self.temperature))
expected_canonical_configurational_heat_capacity = expected_canonical_squared_potential_energy - \
expected_canonical_potential_energy**2
print(expected_canonical_potential_energy,
expected_canonical_configurational_heat_capacity)
# for the calculation regarding the analytical results which are
# compared here, see Master Thesis Jonas Landsgesell p. 72
self.assertAlmostEqual(
expected_canonical_potential_energy - 1.5, 0.00, places=1,
msg="difference to analytical expected canonical potential energy too big")
self.assertAlmostEqual(
expected_canonical_configurational_heat_capacity - 1.5, 0.00, places=1,
msg="difference to analytical expected canonical configurational heat capacity too big")
if __name__ == "__main__":
print("Features: ", espressomd.features())
ut.main()
|
hmenke/espresso
|
testsuite/python/wang_landau_reaction_ensemble.py
|
Python
|
gpl-3.0
| 5,481
|
[
"ESPResSo"
] |
86bc7b7fe274a8300f8385fb61a3f3de43eb50d9fef7f1220239d9b26edc8366
|
# # Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
# $Id$
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" EState fingerprinting
"""
import numpy
from rdkit.Chem.EState import EStateIndices
from rdkit.Chem.EState import AtomTypes
def FingerprintMol(mol):
""" generates the EState fingerprints for the molecule
Concept from the paper: Hall and Kier JCICS _35_ 1039-1045 (1995)
two numeric arrays are returned:
The first (of ints) contains the number of times each possible atom type is hit
The second (of floats) contains the sum of the EState indices for atoms of
each type.
"""
if AtomTypes.esPatterns is None:
AtomTypes.BuildPatts()
esIndices = EStateIndices(mol)
nPatts = len(AtomTypes.esPatterns)
counts = numpy.zeros(nPatts, numpy.int)
sums = numpy.zeros(nPatts, numpy.float)
for i, (_, pattern) in enumerate(AtomTypes.esPatterns):
matches = mol.GetSubstructMatches(pattern, uniquify=1)
counts[i] = len(matches)
for match in matches:
sums[i] += esIndices[match[0]]
return counts, sums
def _exampleCode():
""" Example code for calculating E-state fingerprints """
from rdkit import Chem
smis = ['CC', 'CCC', 'c1[nH]cnc1CC(N)C(O)=O', 'NCCc1ccc(O)c(O)c1']
for smi in smis:
m = Chem.MolFromSmiles(smi)
print(smi, Chem.MolToSmiles(m))
types = AtomTypes.TypeAtoms(m)
for i in range(m.GetNumAtoms()):
print('%d %4s: %s' % (i + 1, m.GetAtomWithIdx(i).GetSymbol(), str(types[i])))
es = EStateIndices(m)
counts, sums = FingerprintMol(m)
for i in range(len(AtomTypes.esPatterns)):
if counts[i]:
name, _ = AtomTypes.esPatterns[i]
print('%6s, % 2d, % 5.4f' % (name, counts[i], sums[i]))
for i in range(len(es)):
print('% 2d, % 5.4f' % (i + 1, es[i]))
print('--------')
if __name__ == '__main__': # pragma: nocover
_exampleCode()
|
greglandrum/rdkit
|
rdkit/Chem/EState/Fingerprinter.py
|
Python
|
bsd-3-clause
| 2,137
|
[
"RDKit"
] |
043e3de69d8d70b3352161133f6d1a17a35806ca47a3eac360ed9a8fd6cbc448
|
# Databricks notebook source
# MAGIC %md
# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)
# MAGIC
# MAGIC This is a 2019-2021 augmentation and update of [Adam Breindel](https://www.linkedin.com/in/adbreind)'s initial notebooks.
# MAGIC
# MAGIC _Thanks to [Christian von Koch](https://www.linkedin.com/in/christianvonkoch/) and [William Anzén](https://www.linkedin.com/in/william-anz%C3%A9n-b52003199/) for their contributions towards making these materials Spark 3.0.1 and Python 3+ compliant._
# COMMAND ----------
# MAGIC %md
# MAGIC #### As we dive into more hands-on works, let's recap some basic guidelines:
# MAGIC
# MAGIC 0. Structure of your network is the first thing to work with, before worrying about the precise number of neurons, size of convolution filters etc.
# MAGIC
# MAGIC 1. "Business records" or fairly (ideally?) uncorrelated predictors -- use Dense Perceptron Layer(s)
# MAGIC
# MAGIC 2. Data that has 2-D patterns: 2D Convolution layer(s)
# MAGIC
# MAGIC 3. For activation of hidden layers, when in doubt, use ReLU
# MAGIC
# MAGIC 4. Output:
# MAGIC * Regression: 1 neuron with linear activation
# MAGIC * For k-way classification: k neurons with softmax activation
# MAGIC
# MAGIC 5. Deeper networks are "smarter" than wider networks (in terms of abstraction)
# MAGIC
# MAGIC 6. More neurons & layers \\( \to \\) more capacity \\( \to \\) more data \\( \to \\) more regularization (to prevent overfitting)
# MAGIC
# MAGIC 7. If you don't have any specific reason not to use the "adam" optimizer, use that one
# MAGIC
# MAGIC 8. Errors:
# MAGIC * For regression or "wide" content matching (e.g., large image similarity), use mean-square-error;
# MAGIC * For classification or narrow content matching, use cross-entropy
# MAGIC
# MAGIC 9. As you simplify and abstract from your raw data, you should need less features/parameters, so your layers probably become smaller and simpler.
# COMMAND ----------
# MAGIC %md
# MAGIC As a baseline, let's start a lab running with what we already know.
# MAGIC
# MAGIC We'll take our deep feed-forward multilayer perceptron network, with ReLU activations and reasonable initializations, and apply it to learning the MNIST digits.
# MAGIC
# MAGIC The main part of the code looks like the following (full code you can run is in the next cell):
# MAGIC
# MAGIC ```
# MAGIC # imports, setup, load data sets
# MAGIC
# MAGIC model = Sequential()
# MAGIC model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu'))
# MAGIC model.add(Dense(15, kernel_initializer='normal', activation='relu'))
# MAGIC model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
# MAGIC model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
# MAGIC
# MAGIC categorical_labels = to_categorical(y_train, num_classes=10)
# MAGIC
# MAGIC history = model.fit(X_train, categorical_labels, epochs=100, batch_size=100)
# MAGIC
# MAGIC # print metrics, plot errors
# MAGIC ```
# MAGIC
# MAGIC Note the changes, which are largely about building a classifier instead of a regression model:
# MAGIC
# MAGIC * Output layer has one neuron per category, with softmax activation
# MAGIC * __Loss function is cross-entropy loss__
# MAGIC * Accuracy metric is categorical accuracy
# COMMAND ----------
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
import sklearn.datasets
import datetime
import matplotlib.pyplot as plt
import numpy as np
train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"
test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt"
X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784)
X_train = X_train.toarray()
X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784)
X_test = X_test.toarray()
model = Sequential()
model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu'))
model.add(Dense(15, kernel_initializer='normal', activation='relu'))
model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
categorical_labels = to_categorical(y_train, num_classes=10)
start = datetime.datetime.today()
history = model.fit(X_train, categorical_labels, epochs=40, batch_size=100, validation_split=0.1, verbose=2)
scores = model.evaluate(X_test, to_categorical(y_test, num_classes=10))
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
print ("Start: " + str(start))
end = datetime.datetime.today()
print ("End: " + str(end))
print ("Elapse: " + str(end-start))
# COMMAND ----------
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fig.set_size_inches((5,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC What are the big takeaways from this experiment?
# MAGIC
# MAGIC 1. We get pretty impressive "apparent error" accuracy right from the start! A small network gets us to training accuracy 97% by epoch 20
# MAGIC 2. The model *appears* to continue to learn if we let it run, although it does slow down and oscillate a bit.
# MAGIC 3. Our test accuracy is about 95% after 5 epochs and never gets better ... it gets worse!
# MAGIC 4. Therefore, we are overfitting very quickly... most of the "training" turns out to be a waste.
# MAGIC 5. For what it's worth, we get 95% accuracy without much work.
# MAGIC
# MAGIC This is not terrible compared to other, non-neural-network approaches to the problem. After all, we could probably tweak this a bit and do even better.
# MAGIC
# MAGIC But we talked about using deep learning to solve "95%" problems or "98%" problems ... where one error in 20, or 50 simply won't work. If we can get to "multiple nines" of accuracy, then we can do things like automate mail sorting and translation, create cars that react properly (all the time) to street signs, and control systems for robots or drones that function autonomously.
# MAGIC
# MAGIC ## You Try Now!
# MAGIC
# MAGIC Try two more experiments (try them separately):
# MAGIC
# MAGIC 1. Add a third, hidden layer.
# MAGIC 2. Increase the size of the hidden layers.
# MAGIC
# MAGIC Adding another layer slows things down a little (why?) but doesn't seem to make a difference in accuracy.
# MAGIC
# MAGIC Adding a lot more neurons into the first topology slows things down significantly -- 10x as many neurons, and only a marginal increase in accuracy. Notice also (in the plot) that the learning clearly degrades after epoch 50 or so.
# COMMAND ----------
|
lamastex/scalable-data-science
|
dbcArchives/2021/000_6-sds-3-x-dl/056_DLbyABr_04a-Hands-On-MNIST-MLP.py
|
Python
|
unlicense
| 7,004
|
[
"NEURON"
] |
bdf0ed423c8a2590983dfca3307d41422d9f2f013efa729f5f4d2dcf34ae2326
|
""" Schemas for structural stacks. """
import datajoint as dj
from datajoint.jobs import key_hash
import matplotlib.pyplot as plt
import numpy as np
import scanreader
from scipy import signal
from scipy import ndimage
from scipy import optimize
import itertools
from . import experiment, notify, shared, reso, meso
anatomy = dj.create_virtual_module('pipeline_anatomy','pipeline_anatomy')
from .utils import galvo_corrections, stitching, performance, enhancement
from .utils.signal import mirrconv, float2uint8
from .exceptions import PipelineException
""" Note on our coordinate system:
Our stack/motor coordinate system is consistent with numpy's: z in the first axis pointing
downwards, y in the second axis pointing towards you and x on the third axis pointing to
the right.
"""
dj.config['external-stack'] = {'protocol': 'file',
'location': '/mnt/dj-stor01/pipeline-externals'}
dj.config['cache'] = '/tmp/dj-cache'
schema = dj.schema('pipeline_stack', locals(), create_tables=False)
@schema
class StackInfo(dj.Imported):
definition = """ # master table with general data about the stacks
-> experiment.Stack
---
nrois : tinyint # number of ROIs
nchannels : tinyint # number of channels
fill_fraction : float # raster scan temporal fill fraction (see scanimage)
"""
class ROI(dj.Part):
definition = """ # 3-D volumes that compose this stack (usually tiled to form a bigger fov)
-> StackInfo
roi_id : tinyint # same as ScanImage's
---
-> experiment.Stack.Filename
field_ids : blob # list of field_ids (0-index) sorted from shallower to deeper
roi_z : float # (um) center of ROI in the motor coordinate system (cortex is at 0)
roi_y : float # (um) center of ROI in the motor coordinate system
roi_x : float # (um) center of ROI in the motor coordinate system
roi_px_depth : smallint # number of slices
roi_px_height : smallint # lines per frame
roi_px_width : smallint # pixels per line
roi_um_depth : float # depth in microns
roi_um_height : float # height in microns
roi_um_width : float # width in microns
nframes : smallint # number of recorded frames per plane
fps : float # (Hz) volumes per second
bidirectional : boolean # true = bidirectional scanning
is_slow : boolean # whether all frames in one depth were recorded before moving to the next
"""
def _make_tuples(self, key, stack, id_in_file):
# Create results tuple
tuple_ = key.copy()
# Get field_ids ordered from shallower to deeper field in this ROI
surf_z = (experiment.Stack() & key).fetch1('surf_depth') # surface depth in fastZ coordinates (meso) or motor coordinates (reso)
if stack.is_multiROI:
field_ids = [i for i, field_roi in enumerate(stack.field_rois) if
id_in_file in field_roi]
field_depths = [stack.field_depths[i] - surf_z for i in field_ids]
else:
field_ids = range(stack.num_scanning_depths)
motor_zero = surf_z - stack.motor_position_at_zero[2]
if stack.is_slow_stack and not stack.is_slow_stack_with_fastZ: # using motor
initial_fastZ = stack.initial_secondary_z or 0
field_depths = [motor_zero - stack.field_depths[i] + 2 * initial_fastZ
for i in field_ids]
else: # using fastZ
field_depths = [motor_zero + stack.field_depths[i] for i in field_ids]
field_depths, field_ids = zip(*sorted(zip(field_depths, field_ids)))
tuple_['field_ids'] = field_ids
# Get reso/meso specific coordinates
x_zero, y_zero, _ = stack.motor_position_at_zero # motor x, y at ScanImage's 0
if stack.is_multiROI:
tuple_['roi_y'] = y_zero + stack._degrees_to_microns(stack.fields[
field_ids[0]].y)
tuple_['roi_x'] = x_zero + stack._degrees_to_microns(stack.fields[
field_ids[0]].x)
tuple_['roi_px_height'] = stack.field_heights[field_ids[0]]
tuple_['roi_px_width'] = stack.field_widths[field_ids[0]]
tuple_['roi_um_height'] = stack.field_heights_in_microns[field_ids[0]]
tuple_['roi_um_width'] = stack.field_widths_in_microns[field_ids[0]]
else:
tuple_['roi_y'] = y_zero
tuple_['roi_x'] = x_zero
tuple_['roi_px_height'] = stack.image_height
tuple_['roi_px_width'] = stack.image_width
# Estimate height and width in microns using measured FOVs for similar setups
fov_rel = (experiment.FOV() * experiment.Session() * experiment.Stack() &
key & 'session_date>=fov_ts')
zooms = fov_rel.fetch('mag').astype(np.float32) # zooms measured in same setup
closest_zoom = zooms[np.argmin(np.abs(np.log(zooms / stack.zoom)))]
dims = (fov_rel & 'ABS(mag - {}) < 1e-4'.format(closest_zoom)).fetch1(
'height', 'width')
um_height, um_width = [float(um) * (closest_zoom / stack.zoom) for um in
dims]
tuple_['roi_um_height'] = um_height * stack._y_angle_scale_factor
tuple_['roi_um_width'] = um_width * stack._x_angle_scale_factor
# Get common parameters
z_step = field_depths[1] - field_depths[0]
tuple_['roi_z'] = field_depths[0] + (field_depths[-1] - field_depths[0]) / 2
tuple_['roi_px_depth'] = len(field_ids)
tuple_['roi_um_depth'] = field_depths[-1] - field_depths[0] + z_step
tuple_['nframes'] = stack.num_frames
tuple_['fps'] = stack.fps
tuple_['bidirectional'] = stack.is_bidirectional
tuple_['is_slow'] = stack.is_slow_stack
self.insert1(tuple_)
@property
def microns_per_pixel(self):
""" Returns an array with microns per pixel in depth, height and width. """
um_dims = self.fetch1('roi_um_depth', 'roi_um_height', 'roi_um_width')
px_dims = self.fetch1('roi_px_depth', 'roi_px_height', 'roi_px_width')
return np.array([um_dim / px_dim for um_dim, px_dim in zip(um_dims, px_dims)])
def _make_tuples(self, key):
""" Read and store stack information."""
print('Reading header...')
# Read files forming this stack
filename_keys = (experiment.Stack.Filename() & key).fetch(dj.key)
stacks = []
for filename_key in filename_keys:
stack_filename = (experiment.Stack.Filename() &
filename_key).local_filenames_as_wildcard
stacks.append(scanreader.read_scan(stack_filename))
num_rois_per_file = [(s.num_rois if s.is_multiROI else 1) for s in stacks]
# Create Stack tuple
tuple_ = key.copy()
tuple_['nrois'] = np.sum(num_rois_per_file)
tuple_['nchannels'] = stacks[0].num_channels
tuple_['fill_fraction'] = stacks[0].temporal_fill_fraction
# Insert Stack
self.insert1(tuple_)
# Insert ROIs
roi_id = 1
for filename_key, num_rois, stack in zip(filename_keys, num_rois_per_file,
stacks):
for roi_id_in_file in range(num_rois):
roi_key = {**key, **filename_key, 'roi_id': roi_id}
StackInfo.ROI()._make_tuples(roi_key, stack, roi_id_in_file)
roi_id += 1
# Fill in CorrectionChannel if only one channel
if stacks[0].num_channels == 1:
CorrectionChannel().fill(key)
@schema
class Quality(dj.Computed):
definition = """ # different quality metrics for a scan (before corrections)
-> StackInfo
"""
class MeanIntensity(dj.Part):
definition = """ # mean intensity per frame and slice
-> Quality
-> StackInfo.ROI
-> shared.Channel
---
intensities : longblob # num_slices x num_frames
"""
class SummaryFrames(dj.Part):
definition = """ # mean slice at 8 different depths
-> Quality
-> StackInfo.ROI
-> shared.Channel
---
summary : longblob # h x w x 8
"""
class Contrast(dj.Part):
definition = """ # difference between 99 and 1 percentile per frame and slice
-> Quality
-> StackInfo.ROI
-> shared.Channel
---
contrasts : longblob # num_slices x num_frames
"""
def _make_tuples(self, key):
print('Computing quality metrics for stack', key)
# Insert in Quality
self.insert1(key)
for roi_tuple in (StackInfo.ROI() & key).fetch():
# Load ROI
roi_filename = (experiment.Stack.Filename() &
roi_tuple).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
for channel in range((StackInfo() & key).fetch1('nchannels')):
# Map: Compute quality metrics in each field
f = performance.parallel_quality_stack # function to map
field_ids = roi_tuple['field_ids']
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=channel)
# Reduce: Collect results
mean_intensities = np.empty((roi_tuple['roi_px_depth'],
roi_tuple['nframes']))
contrasts = np.empty((roi_tuple['roi_px_depth'], roi_tuple['nframes']))
for field_idx, field_mis, field_contrasts, _ in results:
mean_intensities[field_idx] = field_mis
contrasts[field_idx] = field_contrasts
frames = [res[3] for res in sorted(results, key=lambda res: res[0])]
frames = np.stack(frames[:: int(len(frames) / 8)], axis=-1) # frames at 8 diff depths
# Insert
roi_key = {**key, 'roi_id': roi_tuple['roi_id'], 'channel': channel + 1}
self.MeanIntensity().insert1({**roi_key, 'intensities': mean_intensities})
self.Contrast().insert1({**roi_key, 'contrasts': contrasts})
self.SummaryFrames().insert1({**roi_key, 'summary': frames})
self.notify(roi_key, frames, mean_intensities, contrasts)
@notify.ignore_exceptions
def notify(self, key, summary_frames, mean_intensities, contrasts):
# Send summary frames
import imageio
video_filename = '/tmp/' + key_hash(key) + '.gif'
percentile_99th = np.percentile(summary_frames, 99.5)
summary_frames = np.clip(summary_frames, None, percentile_99th)
summary_frames = float2uint8(summary_frames).transpose([2, 0, 1])
imageio.mimsave(video_filename, summary_frames, duration=0.4)
msg = ('summary frames for {animal_id}-{session}-{stack_idx} channel '
'{channel}').format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg)
# Send intensity and contrasts
figsize = (min(4, contrasts.shape[1] / 10 + 1), contrasts.shape[0] / 30 + 1) # set heuristically
fig, axes = plt.subplots(1, 2, figsize=figsize, sharex=True, sharey=True)
fig.tight_layout()
axes[0].set_title('Mean intensity', size='small')
axes[0].imshow(mean_intensities)
axes[0].set_ylabel('Slices')
axes[0].set_xlabel('Frames')
axes[1].set_title('Contrast (99 - 1 percentile)', size='small')
axes[1].imshow(contrasts)
axes[1].set_xlabel('Frames')
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('quality images for {animal_id}-{session}-{stack_idx} channel '
'{channel}').format(**key)
slack_user.notify(file=img_filename, file_title=msg)
@schema
class CorrectionChannel(dj.Manual):
definition = """ # channel to use for raster and motion correction
-> experiment.Stack
---
-> shared.Channel
"""
def fill(self, key, channel=1):
for stack_key in (StackInfo() & key).fetch(dj.key):
self.insert1({**stack_key, 'channel': channel}, ignore_extra_fields=True,
skip_duplicates=True)
@schema
class RasterCorrection(dj.Computed):
definition = """ # raster correction for bidirectional resonant scans
-> StackInfo.ROI # animal_id, session, stack_idx, roi_id, version
-> CorrectionChannel # animal_id, session, stack_idx
---
raster_phase : float # difference between expected and recorded scan angle
raster_std : float # standard deviation among raster phases in different slices
"""
def _make_tuples(self, key):
""" Compute raster phase discarding top and bottom 15% of slices and tapering
edges to avoid edge artifacts."""
print('Computing raster correction for ROI', key)
# Get some params
res = (StackInfo.ROI() & key).fetch1('bidirectional', 'roi_px_height',
'roi_px_width', 'field_ids')
is_bidirectional, image_height, image_width, field_ids = res
correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1
if is_bidirectional:
# Read the ROI
filename_rel = (experiment.Stack.Filename() & (StackInfo.ROI() & key))
roi_filename = filename_rel.local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Compute some parameters
skip_fields = max(1, int(round(len(field_ids) * 0.10)))
taper = np.sqrt(np.outer(signal.tukey(image_height, 0.4),
signal.tukey(image_width, 0.4)))
# Compute raster phase for each slice and take the median
raster_phases = []
for field_id in field_ids[skip_fields: -2 * skip_fields]:
# Create template (average frame tapered to avoid edge artifacts)
slice_ = roi[field_id, :, :, correction_channel, :].astype(np.float32,
copy=False)
anscombed = 2 * np.sqrt(slice_ - slice_.min(axis=(0, 1)) + 3 / 8) # anscombe transform
template = np.mean(anscombed, axis=-1) * taper
# Compute raster correction
raster_phases.append(galvo_corrections.compute_raster_phase(template,
roi.temporal_fill_fraction))
raster_phase = np.median(raster_phases)
raster_std = np.std(raster_phases)
else:
raster_phase = 0
raster_std = 0
# Insert
self.insert1({**key, 'raster_phase': raster_phase, 'raster_std': raster_std})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
msg = ('raster phase for {animal_id}-{session}-{stack_idx} roi {roi_id}: '
'{phase}').format(**key, phase=(self & key).fetch1('raster_phase'))
(notify.SlackUser() & (experiment.Session() & key)).notify(msg)
def correct(self, roi):
""" Correct roi with parameters extracted from self. In place.
:param np.array roi: ROI (fields, image_height, image_width, frames).
"""
raster_phase = self.fetch1('raster_phase')
fill_fraction = (StackInfo() & self).fetch1('fill_fraction')
if abs(raster_phase) < 1e-7:
corrected = roi.astype(np.float32, copy=False)
else:
corrected = roi # in_place
for i, field in enumerate(roi):
corrected[i] = galvo_corrections.correct_raster(field, raster_phase,
fill_fraction)
return corrected
@schema
class MotionCorrection(dj.Computed):
definition = """ # motion correction for each slice in the stack
-> RasterCorrection
---
y_shifts : longblob # y motion correction shifts (num_slices x num_frames)
x_shifts : longblob # x motion correction shifts (num_slices x num_frames)
"""
def _make_tuples(self, key):
""" Compute motion shifts to align frames over time and over slices."""
print('Computing motion correction for ROI', key)
# Get some params
res = (StackInfo.ROI() & key).fetch1('nframes', 'roi_px_height', 'roi_px_width',
'field_ids')
num_frames, image_height, image_width, field_ids = res
correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1
y_shifts = np.zeros([len(field_ids), num_frames])
x_shifts = np.zeros([len(field_ids), num_frames])
if num_frames > 1:
# Read the ROI
filename_rel = (experiment.Stack.Filename() & (StackInfo.ROI() & key))
roi_filename = filename_rel.local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Compute some params
skip_rows = int(round(image_height * 0.10))
skip_cols = int(round(image_width * 0.10))
# Map: Compute shifts in parallel
f = performance.parallel_motion_stack # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (StackInfo() & key).fetch1('fill_fraction')
max_y_shift, max_x_shift = 20 / (StackInfo.ROI() & key).microns_per_pixel[1:]
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=correction_channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'skip_rows': skip_rows,
'skip_cols': skip_cols,
'max_y_shift': max_y_shift,
'max_x_shift': max_x_shift})
# Reduce: Collect results
for field_idx, y_shift, x_shift in results:
y_shifts[field_idx] = y_shift
x_shifts[field_idx] = x_shift
# Insert
self.insert1({**key, 'y_shifts': y_shifts, 'x_shifts': x_shifts})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
fps, is_slow_stack = (StackInfo.ROI() & key).fetch1('fps', 'is_slow')
num_slices, num_frames = y_shifts.shape
fps = fps * (num_slices if is_slow_stack else 1)
seconds = np.arange(num_frames) / fps
fig, axes = plt.subplots(2, 1, figsize=(13, 10), sharex=True, sharey=True)
axes[0].set_title('Shifts in y for all slices')
axes[0].set_ylabel('Pixels')
axes[0].plot(seconds, y_shifts.T)
axes[1].set_title('Shifts in x for all slices')
axes[1].set_ylabel('Pixels')
axes[1].set_xlabel('Seconds')
axes[1].plot(seconds, x_shifts.T)
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename)
plt.close(fig)
msg = 'motion shifts for {animal_id}-{session}-{stack_idx} roi {roi_id}'.format(
**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def save_as_tiff(self, filename='roi.tif', channel=1):
""" Correct roi and save as a tiff file.
:param int channel: What channel to use. Starts at 1
"""
from tifffile import imsave
# Get some params
res = (StackInfo.ROI() & self).fetch1('field_ids', 'roi_px_depth',
'roi_px_height', 'roi_px_width')
field_ids, px_depth, px_height, px_width = res
# Load ROI
roi_filename = (experiment.Stack.Filename() & self).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Map: Apply corrections to each field in parallel
f = performance.parallel_correct_stack # function to map
raster_phase = (RasterCorrection() & self).fetch1('raster_phase')
fill_fraction = (StackInfo() & self).fetch1('fill_fraction')
y_shifts, x_shifts = self.fetch1('y_shifts', 'x_shifts')
results = performance.map_fields(f, roi, field_ids=field_ids, channel=channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'y_shifts': y_shifts,
'x_shifts': x_shifts})
# Reduce: Collect results
corrected_roi = np.empty((px_depth, px_height, px_width), dtype=np.float32)
for field_idx, corrected_field in results:
corrected_roi[field_idx] = corrected_field
print('Saving file at:', filename)
imsave(filename, corrected_roi)
@schema
class Stitching(dj.Computed):
definition = """ # stitches together overlapping rois
-> StackInfo
"""
@property
def key_source(self):
return StackInfo() - (StackInfo.ROI() - MotionCorrection()) # run iff all ROIs have been processed
class Volume(dj.Part):
definition = """ # union of ROIs from a stack (usually one volume per stack)
-> Stitching
volume_id : tinyint # id of this volume
"""
class ROICoordinates(dj.Part):
definition = """ # coordinates for each ROI in the stitched volume
-> Stitching # animal_id, session, stack_idx, version
-> MotionCorrection # animal_id, session, stack_idx, version, roi_id
---
-> Stitching.Volume # volume to which this ROI belongs
stitch_ys : blob # (px) center of each slice in a volume-wise coordinate system
stitch_xs : blob # (px) center of each slice in a volume-wise coordinate system
"""
def _make_tuples(self, key):
""" Stitch overlapping ROIs together and correct slice-to-slice alignment.
Iteratively stitches two overlapping ROIs if the overlapping dimension has the
same length (up to some relative tolerance). Stitching params are calculated per
slice.
Edge case: when two overlapping ROIs have different px/micron resolution
They won't be joined even if true height are the same (as pixel heights will
not match) or pixel heights could happen to match even if true heights are
different and thus they'll be erroneously stitched.
"""
print('Stitching ROIs for stack', key)
# Get some params
correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1
# Read and correct ROIs forming this stack
print('Correcting ROIs...')
rois = []
for roi_tuple in (StackInfo.ROI() & key).fetch():
# Load ROI
roi_filename = (experiment.Stack.Filename() &
roi_tuple).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Map: Apply corrections to each field in parallel
f = performance.parallel_correct_stack # function to map
raster_phase = (RasterCorrection() & roi_tuple).fetch1('raster_phase')
fill_fraction = (StackInfo() & roi_tuple).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & roi_tuple).fetch1('y_shifts',
'x_shifts')
field_ids = roi_tuple['field_ids']
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=correction_channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'y_shifts': y_shifts,
'x_shifts': x_shifts,
'apply_anscombe': True})
# Reduce: Collect results
corrected_roi = np.empty((roi_tuple['roi_px_depth'],
roi_tuple['roi_px_height'],
roi_tuple['roi_px_width']), dtype=np.float32)
for field_idx, corrected_field in results:
corrected_roi[field_idx] = corrected_field
# Create ROI object
um_per_px = (StackInfo.ROI() & (StackInfo.ROI().proj() &
roi_tuple)).microns_per_pixel
px_z, px_y, px_x = np.array([roi_tuple['roi_{}'.format(dim)] for dim in
['z', 'y', 'x']]) / um_per_px
rois.append(stitching.StitchedROI(corrected_roi, x=px_x, y=px_y, z=px_z,
id_=roi_tuple['roi_id']))
def enhance(image, sigmas):
""" Enhance 2p image. See enhancement.py for details."""
return enhancement.sharpen_2pimage(enhancement.lcn(image, sigmas))
def join_rows(rois_):
""" Iteratively join all rois that overlap in the same row."""
sorted_rois = sorted(rois_, key=lambda roi: (roi.x, roi.y))
prev_num_rois = float('inf')
while len(sorted_rois) < prev_num_rois:
prev_num_rois = len(sorted_rois)
for left, right in itertools.combinations(sorted_rois, 2):
if left.is_aside_to(right):
roi_key = {**key, 'roi_id': left.roi_coordinates[0].id}
um_per_px = (StackInfo.ROI() & roi_key).microns_per_pixel
# Compute stitching shifts
neighborhood_size = 25 / um_per_px[1:]
left_ys, left_xs = [], []
for l, r in zip(left.slices, right.slices):
left_slice = enhance(l.slice, neighborhood_size)
right_slice = enhance(r.slice, neighborhood_size)
delta_y, delta_x = stitching.linear_stitch(left_slice,
right_slice,
r.x - l.x)
left_ys.append(r.y - delta_y)
left_xs.append(r.x - delta_x)
# Fix outliers
max_y_shift, max_x_shift = 10 / um_per_px[1:]
left_ys, left_xs, _ = galvo_corrections.fix_outliers(
np.array(left_ys), np.array(left_xs), max_y_shift,
max_x_shift, method='linear')
# Stitch together
right.join_with(left, left_xs, left_ys)
sorted_rois.remove(left)
break # restart joining
return sorted_rois
# Stitch overlapping rois recursively
print('Computing stitching parameters...')
prev_num_rois = float('Inf') # to enter the loop at least once
while len(rois) < prev_num_rois:
prev_num_rois = len(rois)
# Join rows
rois = join_rows(rois)
# Join columns
[roi.rot90() for roi in rois]
rois = join_rows(rois)
[roi.rot270() for roi in rois]
# Compute slice-to slice alignment
print('Computing slice-to-slice alignment...')
for roi in rois:
big_volume = roi.volume
num_slices, image_height, image_width = big_volume.shape
roi_key = {**key, 'roi_id': roi.roi_coordinates[0].id}
um_per_px = (StackInfo.ROI() & roi_key).microns_per_pixel
# Enhance
neighborhood_size = 25 / um_per_px[1:]
for i in range(num_slices):
big_volume[i] = enhance(big_volume[i], neighborhood_size)
# Drop 10% of the image borders
skip_rows = int(round(image_height * 0.1))
skip_columns = int(round(image_width * 0.1))
big_volume = big_volume[:, skip_rows:-skip_rows, skip_columns: -skip_columns]
y_aligns = np.zeros(num_slices)
x_aligns = np.zeros(num_slices)
for i in range(1, num_slices):
# Align current slice to previous one
y_aligns[i], x_aligns[i] = galvo_corrections.compute_motion_shifts(
big_volume[i], big_volume[i - 1], in_place=False)
# Fix outliers
max_y_shift, max_x_shift = 15 / um_per_px[1:]
y_fixed, x_fixed, _ = galvo_corrections.fix_outliers(y_aligns, x_aligns,
max_y_shift, max_x_shift)
# Accumulate shifts so shift i is shift in i -1 plus shift to align i to i-1
y_cumsum, x_cumsum = np.cumsum(y_fixed), np.cumsum(x_fixed)
# Detrend to discard influence of vessels going through the slices
filter_size = int(round(60 / um_per_px[0])) # 60 microns in z
filter_size += 1 if filter_size % 2 == 0 else 0
if len(y_cumsum) > filter_size:
smoothing_filter = signal.hann(filter_size)
smoothing_filter /= sum(smoothing_filter)
y_detrend = y_cumsum - mirrconv(y_cumsum, smoothing_filter)
x_detrend = x_cumsum - mirrconv(x_cumsum, smoothing_filter)
else:
y_detrend = y_cumsum - y_cumsum.mean()
x_detrend = x_cumsum - x_cumsum.mean()
# Apply alignment shifts in roi
for slice_, y_align, x_align in zip(roi.slices, y_detrend, x_detrend):
slice_.y -= y_align
slice_.x -= x_align
for roi_coord in roi.roi_coordinates:
roi_coord.ys = [prev_y - y_align for prev_y, y_align in zip(roi_coord.ys,
y_detrend)]
roi_coord.xs = [prev_x - x_align for prev_x, x_align in zip(roi_coord.xs,
x_detrend)]
# Insert in Stitching
print('Inserting...')
self.insert1(key)
# Insert each stitched volume
for volume_id, roi in enumerate(rois, start=1):
self.Volume().insert1({**key, 'volume_id': volume_id})
# Insert coordinates of each ROI forming this volume
for roi_coord in roi.roi_coordinates:
tuple_ = {**key, 'roi_id': roi_coord.id, 'volume_id': volume_id,
'stitch_xs': roi_coord.xs, 'stitch_ys': roi_coord.ys}
self.ROICoordinates().insert1(tuple_)
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
slack_user = (notify.SlackUser() & (experiment.Session() & key))
for volume_key in (self.Volume() & key).fetch('KEY'):
for roi_coord in (self.ROICoordinates() & volume_key).fetch(as_dict=True):
center_z, num_slices, um_depth = (StackInfo.ROI() & roi_coord).fetch1(
'roi_z', 'roi_px_depth', 'roi_um_depth')
first_z = center_z - um_depth / 2 + (um_depth / num_slices) / 2
depths = first_z + (um_depth / num_slices) * np.arange(num_slices)
fig, axes = plt.subplots(2, 1, figsize=(15, 8), sharex=True)
axes[0].set_title('Center position (x)')
axes[0].plot(depths, roi_coord['stitch_xs'])
axes[1].set_title('Center position (y)')
axes[1].plot(depths, roi_coord['stitch_ys'])
axes[0].set_ylabel('Pixels')
axes[0].set_xlabel('Depths')
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('stitch traces for {animal_id}-{session}-{stack_idx} volume '
'{volume_id} roi {roi_id}').format(**roi_coord)
slack_user.notify(file=img_filename, file_title=msg)
@schema
class CorrectedStack(dj.Computed):
definition = """ # all slices of each stack after corrections.
-> Stitching.Volume # animal_id, session, stack_idx, volume_id
---
z : float # (um) center of volume in the motor coordinate system (cortex is at 0)
y : float # (um) center of volume in the motor coordinate system
x : float # (um) center of volume in the motor coordinate system
px_depth : smallint # number of slices
px_height : smallint # lines per frame
px_width : smallint # pixels per line
um_depth : float # depth in microns
um_height : float # height in microns
um_width : float # width in microns
surf_z : float # (um) depth of first slice - half a z step (cortex is at z=0)
"""
class Slice(dj.Part):
definition = """ # single slice of one stack
-> CorrectedStack
-> shared.Channel
islice : smallint # index of slice in volume
---
slice : longblob # image (height x width)
"""
def _make_tuples(self, key):
print('Correcting stack', key)
for channel in range((StackInfo() & key).fetch1('nchannels')):
# Correct ROIs
rois = []
for roi_tuple in (StackInfo.ROI() * Stitching.ROICoordinates() & key).fetch():
# Load ROI
roi_filename = (experiment.Stack.Filename() &
roi_tuple).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Map: Apply corrections to each field in parallel
f = performance.parallel_correct_stack # function to map
raster_phase = (RasterCorrection() & roi_tuple).fetch1('raster_phase')
fill_fraction = (StackInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & roi_tuple).fetch1('y_shifts',
'x_shifts')
field_ids = roi_tuple['field_ids']
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'y_shifts': y_shifts,
'x_shifts': x_shifts})
# Reduce: Collect results
corrected_roi = np.empty((roi_tuple['roi_px_depth'],
roi_tuple['roi_px_height'],
roi_tuple['roi_px_width']), dtype=np.float32)
for field_idx, corrected_field in results:
corrected_roi[field_idx] = corrected_field
# Create ROI object (with pixel x, y, z coordinates)
px_z = roi_tuple['roi_z'] * (roi_tuple['roi_px_depth'] /
roi_tuple['roi_um_depth'])
ys = list(roi_tuple['stitch_ys'])
xs = list(roi_tuple['stitch_xs'])
rois.append(stitching.StitchedROI(corrected_roi, x=xs, y=ys, z=px_z,
id_=roi_tuple['roi_id']))
def join_rows(rois_):
""" Iteratively join all rois that overlap in the same row."""
sorted_rois = sorted(rois_, key=lambda roi: (roi.x, roi.y))
prev_num_rois = float('inf')
while len(sorted_rois) < prev_num_rois:
prev_num_rois = len(sorted_rois)
for left, right in itertools.combinations(sorted_rois, 2):
if left.is_aside_to(right):
left_xs = [s.x for s in left.slices]
left_ys = [s.y for s in left.slices]
right.join_with(left, left_xs, left_ys)
sorted_rois.remove(left)
break # restart joining
return sorted_rois
# Stitch all rois together. This is convoluted because smooth blending in
# join_with assumes rois are next to (not below or atop of) each other
prev_num_rois = float('Inf') # to enter the loop at least once
while len(rois) < prev_num_rois:
prev_num_rois = len(rois)
# Join rows
rois = join_rows(rois)
# Join columns
[roi.rot90() for roi in rois]
rois = join_rows(rois)
[roi.rot270() for roi in rois]
# Check stitching went alright
if len(rois) > 1:
msg = 'ROIs for volume {} could not be stitched properly'.format(key)
raise PipelineException(msg)
stitched = rois[0]
# Insert in CorrectedStack
roi_info = StackInfo.ROI() & key & {'roi_id': stitched.roi_coordinates[0].id}
um_per_px = roi_info.microns_per_pixel
tuple_ = key.copy()
tuple_['z'] = stitched.z * um_per_px[0]
tuple_['y'] = stitched.y * um_per_px[1]
tuple_['x'] = stitched.x * um_per_px[2]
tuple_['px_depth'] = stitched.depth
tuple_['px_height'] = stitched.height
tuple_['px_width'] = stitched.width
tuple_['um_depth'] = roi_info.fetch1('roi_um_depth') # same as original rois
tuple_['um_height'] = stitched.height * um_per_px[1]
tuple_['um_width'] = stitched.width * um_per_px[2]
tuple_['surf_z'] = (stitched.z - stitched.depth / 2) * um_per_px[0]
self.insert1(tuple_, skip_duplicates=True)
# Insert each slice
for i, slice_ in enumerate(stitched.volume):
self.Slice().insert1({**key, 'channel': channel + 1, 'islice': i + 1,
'slice': slice_})
self.notify({**key, 'channel': channel + 1})
@notify.ignore_exceptions
def notify(self, key):
import imageio
volume = (self & key).get_stack(channel=key['channel'])
volume = volume[:: int(volume.shape[0] / 8)] # volume at 8 diff depths
video_filename = '/tmp/' + key_hash(key) + '.gif'
imageio.mimsave(video_filename, float2uint8(volume), duration=1)
msg = ('corrected stack for {animal_id}-{session}-{stack_idx} volume {volume_id} '
'channel {channel}').format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg,
channel='#pipeline_quality')
@property
def microns_per_pixel(self):
""" Returns an array with microns per pixel in depth, height and width. """
um_dims = self.fetch1('um_depth', 'um_height', 'um_width')
px_dims = self.fetch1('px_depth', 'px_height', 'px_width')
return np.array([um_dim / px_dim for um_dim, px_dim in zip(um_dims, px_dims)])
def get_stack(self, channel=1):
""" Get full stack (num_slices, height, width).
:param int channel: What channel to use. Starts at 1
:returns The stack: a (num_slices, image_height, image_width) array.
:rtype: np.array (float32)
"""
slice_rel = (CorrectedStack.Slice() & self & {'channel': channel})
slices = slice_rel.fetch('slice', order_by='islice')
return np.stack(slices)
def save_as_tiff(self, filename='stack.tif'):
""" Save current stack as a tiff file."""
from tifffile import imsave
# Create a composite interleaving channels
height, width, depth = self.fetch1('px_height', 'px_width', 'px_depth')
num_channels = (StackInfo() & self).fetch1('nchannels')
composite = np.zeros([num_channels * depth, height, width], dtype=np.float32)
for i in range(num_channels):
composite[i::num_channels] = self.get_stack(i + 1)
# Save
print('Saving file at:', filename)
imsave(filename, composite)
def save_video(self, filename='stack.mp4', channel=1, fps=10, dpi=250):
""" Creates an animation video showing a fly-over of the stack (top to bottom).
:param string filename: Output filename (path + filename)
:param int channel: What channel to use. Starts at 1
:param int start_index: Where in the scan to start the video.
:param int fps: Number of slices shown per second.
:param int dpi: Dots per inch, controls the quality of the video.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from matplotlib import animation
stack = self.get_stack(channel=channel)
num_slices = stack.shape[0]
fig, axes = plt.subplots(1, 1, sharex=True, sharey=True)
im = fig.gca().imshow(stack[int(num_slices / 2)])
video = animation.FuncAnimation(fig, lambda i: im.set_data(stack[i]), num_slices,
interval=1000 / fps)
fig.tight_layout()
if not filename.endswith('.mp4'):
filename += '.mp4'
print('Saving video at:', filename)
print('If this takes too long, stop it and call again with dpi <', dpi,
'(default)')
video.save(filename, dpi=dpi)
return fig
@schema
class PreprocessedStack(dj.Computed):
definition = """ # Resize to 1 um^3, apply local contrast normalization and sharpen
-> CorrectedStack
-> shared.Channel
---
resized: external-stack # original stack resized to 1 um^3
lcned: external-stack # local contrast normalized stack. Filter size: (3, 25, 25)
sharpened: external-stack # sharpened stack. Filter size: 1
"""
@property
def key_source(self):
# restrict each stack to its channels
return (CorrectedStack * shared.Channel).proj() & CorrectedStack.Slice.proj()
def make(self, key):
from .utils import registration
from .utils import enhancement
# Load stack
stack = (CorrectedStack() & key).get_stack(key['channel'])
# Resize to be 1 um^3
um_sizes = (CorrectedStack & key).fetch1('um_depth', 'um_height', 'um_width')
resized = registration.resize(stack, um_sizes, desired_res=1)
# Enhance
lcned = enhancement.lcn(resized, (3, 25, 25))
# Sharpen
sharpened = enhancement.sharpen_2pimage(lcned, 1)
# Insert
self.insert1({**key, 'resized': resized, 'lcned': lcned, 'sharpened': sharpened})
@schema
class Surface(dj.Computed):
definition = """ # Calculated surface of the brain
-> PreprocessedStack
-> shared.SurfaceMethod
---
guessed_points : longblob # Array of guessed depths stored in (z,y,x) format
surface_im : longblob # Matrix of fitted depth for each pixel in stack. Value is number of pixels to surface from top of array.
lower_bound_im : longblob # Lower bound of 95th percentile confidence interval
upper_bound_im : longblob # Upper bound of 95th percentile confidence interval
"""
def make(self, key):
# WARNINGS
# - This code assumes the surface will be in the top half of the stack
# - Only the top half of z-values are analyzed
# - Points along the edge are dropped to avoid errors due to blank space left by stack registration
# - This code assumes the surface median intensity should be in the bottom 60% of the range of values over z
# - ex. Intensities ranges from 10-20. Surface points must have an intensity < .6*(20-10) + 10 = 17.5
# - This is within the 2r x 2r window being analyzed
# - This code drops any 2r x 2r field where the first median value is above the 30th-percentile of the whole stack.
# - Windows where the final median intensity is below 10 are removed
# - Attempts to replace this with a percentile all fail
# - This code drops guessed depths > 95th-percentile and < 5th-percentile to be more robust to outliers
valid_method_ids = [1] # Used to check if method is implemented
# SETTINGS
# Note: Intial parameters for fitting set further down
r = 50 # Radius of square in pixels
upper_threshold_percent = 0.6 # Surface median intensity should be in the bottom X% of the *range* of medians
gaussian_blur_size = 5 # Size of gaussian blur applied to slice
min_points_allowed = 10 # If there are less than X points after filtering, throw an error
bounds = ([0, 0, np.NINF, np.NINF, np.NINF], [np.Inf, np.Inf, np.Inf, np.Inf, np.Inf]) # Bounds for paraboloid fit
ss_percent = 0.40 # Percentage of points to subsample for robustness check
num_iterations = 1000 # Number of iterations to use for robustness check
# DEFINITIONS
def surface_eqn(data, a, b, c, d, f):
x, y = data
return a * x ** 2 + b * y ** 2 + c * x + d * y + f
# MAIN BODY
if int(key['surface_method_id']) not in valid_method_ids:
raise PipelineException(f'Error: surface_method_id {key["surface_method_id"]} is not implemented')
print('Calculating surface of brain for stack', key)
full_stack = (PreprocessedStack & key).fetch1('resized')
depth, height, width = full_stack.shape
surface_guess_map = []
r_xs = np.arange(r, width - width % r, r * 2)[1:-1]
r_ys = np.arange(r, height - height % r, r * 2)[1:-1]
full_mesh_x, full_mesh_y = np.meshgrid(np.arange(width), np.arange(height))
# Surface z should be below this value
z_lim = int(depth / 2)
# Mean intensity of the first frame in the slice should be less than this value
z_0_upper_threshold = np.percentile(full_stack, 30)
for x in r_xs:
for y in r_ys:
stack_slice_medians = np.percentile(full_stack[0:z_lim, y - r:y + r, x - r:x + r], 50, axis=(1, 2))
blurred_slice = ndimage.gaussian_filter1d(stack_slice_medians, gaussian_blur_size)
upper_threshold_value = upper_threshold_percent * (
(blurred_slice.max() - blurred_slice.min()) - blurred_slice.min())
upper_threshold_idx = np.where(blurred_slice > upper_threshold_value)[0][0]
stack_slice_derivative = ndimage.sobel(blurred_slice)
surface_z = np.argmax(stack_slice_derivative)
if ((surface_z < upper_threshold_idx) and (blurred_slice[0] < z_0_upper_threshold) and
(blurred_slice[-1] > 10)):
surface_guess_map.append((surface_z, y, x))
if len(surface_guess_map) < min_points_allowed:
raise PipelineException(f"Surface calculation could not find enough valid points for {key}. Only "
f"{len(surface_guess_map)} detected")
# Drop the z-values lower than 5th-percentile or greater than 95th-percentile
arr = np.array(surface_guess_map)
top = np.percentile(arr[:, 0], 95)
bot = np.percentile(arr[:, 0], 5)
surface_guess_map = arr[np.logical_and(arr[:, 0] > bot, arr[:, 0] < top)]
# Guess for initial parameters
initial = [1, 1, int(width / 2), int(height / 2), 1]
popt, pcov = optimize.curve_fit(surface_eqn, (surface_guess_map[:, 2], surface_guess_map[:, 1]),
surface_guess_map[:, 0], p0=initial, maxfev=10000, bounds=bounds)
calculated_surface_map = surface_eqn((full_mesh_x, full_mesh_y), *popt)
all_sub_fitted_z = np.zeros((num_iterations, height, width))
for i in np.arange(num_iterations):
indices = np.random.choice(surface_guess_map.shape[0], int(surface_guess_map.shape[0] * ss_percent),
replace=False)
subsample = surface_guess_map[indices]
sub_popt, sub_pcov = optimize.curve_fit(surface_eqn, (subsample[:, 2], subsample[:, 1]), subsample[:, 0],
p0=initial, maxfev=10000, bounds=bounds)
all_sub_fitted_z[i, :, :] = surface_eqn((full_mesh_x, full_mesh_y), *sub_popt)
z_min_matrix = np.percentile(all_sub_fitted_z, 5, axis=0)
z_max_matrix = np.percentile(all_sub_fitted_z, 95, axis=0)
surface_key = {**key, 'guessed_points': surface_guess_map, 'surface_im': calculated_surface_map,
'lower_bound_im': z_min_matrix, 'upper_bound_im': z_max_matrix}
self.insert1(surface_key)
def plot_surface3d(self, fig_height=7, fig_width=9):
""" Plot guessed surface points and fitted surface mesh in 3D
:param fig_height: Height of returned figure
:param fig_width: Width of returned figure
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
surface_guess_map, fitted_surface = self.fetch1('guessed_points', 'surface_im')
surface_height, surface_width = fitted_surface.shape
mesh_x, mesh_y = np.meshgrid(np.arange(surface_width), np.arange(surface_height))
fig = plt.figure(figsize=(fig_width, fig_height))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(mesh_x, mesh_y, fitted_surface, cmap=cm.coolwarm, linewidth=0, antialiased=False,
alpha=0.5)
ax.scatter(surface_guess_map[:, 2], surface_guess_map[:, 1], surface_guess_map[:, 0], color='grey')
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.invert_zaxis()
return fig
def plot_surface2d(self, r=50, z=None, fig_height=10, fig_width=20):
""" Plot grid of guessed points and fitted surface depths spaced 2r apart on top of stack slice at depth = z
:param r: Defines radius of square for each grid point
:param z: Pixel depth of stack to show behind depth grid
:param fig_height: Height of returned figure
:param fig_width: Width of returned figure
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from matplotlib import cm
full_stack = (PreprocessedStack & self).fetch1('resized')
stack_depth, stack_height, stack_width = full_stack.shape
surface_guess_map, fitted_surface = self.fetch1('guessed_points', 'surface_im')
fig, axes = plt.subplots(1, 2, figsize=(fig_width, fig_height))
r_xs = np.arange(r, stack_width - stack_width % r, r * 2)
r_ys = np.arange(r, stack_height - stack_height % r, r * 2)
r_mesh_x, r_mesh_y = np.meshgrid(r_xs, r_ys)
# Using median of depth to pick slice of stack to show if not defined
if z is None:
z = np.median(fitted_surface)
if z < 0 or z > stack_depth:
raise PipelineException(f'Error: Z parameter {z} is out of bounds for stack with depth {depth}')
vmin = np.min((np.min(fitted_surface), np.min(surface_guess_map[:, 0])))
vmax = np.max((np.max(fitted_surface), np.max(surface_guess_map[:, 0])))
guessed_scatter = axes[0].scatter(x=surface_guess_map[:, 2], y=surface_guess_map[:, 1],
c=surface_guess_map[:, 0], cmap=cm.hot, vmin=vmin, vmax=vmax)
fitted_scatter = axes[1].scatter(x=r_mesh_x, y=r_mesh_y, c=fitted_surface[r_mesh_y, r_mesh_x], cmap=cm.hot,
vmin=vmin, vmax=vmax)
for point in surface_guess_map:
axes[0].annotate(int(point[0]), (point[2], point[1]), color='white')
for x in r_xs:
for y in r_ys:
axes[1].annotate(int(fitted_surface[y, x]), (x, y), color='white')
fig.colorbar(guessed_scatter, ax=axes[0], fraction=0.05)
axes[0].set_title(f'Guessed Depth, Z = {int(z)}')
fig.colorbar(fitted_scatter, ax=axes[1], fraction=0.05)
axes[1].set_title(f'Fitted Depth, Z = {int(z)}')
for ax in axes:
ax.imshow(full_stack[int(z), :, :])
ax.set_axis_off()
return fig
@schema
class SegmentationTask(dj.Manual):
definition = """ # defines the target, the method and the channel to use for segmentation
-> CorrectedStack
-> shared.Channel
-> shared.StackSegmMethod
---
-> experiment.Compartment
"""
def fill(self, key, channel=1, stacksegm_method=2, compartment='soma'):
for stack_key in (CorrectedStack() & key).fetch(dj.key):
tuple_ = {**stack_key, 'channel': channel,
'stacksegm_method': stacksegm_method,
'compartment': compartment}
self.insert1(tuple_, ignore_extra_fields=True, skip_duplicates=True)
@schema
class Segmentation(dj.Computed):
definition = """ # 3-d stack segmentation
-> PreprocessedStack
-> SegmentationTask
---
segmentation : external-stack # voxel-wise cell-ids (0 for background)
nobjects : int # number of cells found
"""
class ConvNet(dj.Part):
definition = """ # attributes particular to convnet based methods
-> master
---
centroids : external-stack # voxel-wise probability of centroids
probs : external-stack # voxel-wise probability of cell nuclei
seg_threshold : float # threshold used for the probability maps
min_voxels : int # minimum number of voxels (in cubic microns)
max_voxels : int # maximum number of voxels (in cubic microns)
compactness_factor : float # compactness factor used for the watershed segmentation
"""
def _make_tuples(self, key):
from .utils import segmentation3d
# Set params
seg_threshold = 0.8
min_voxels = 65 # sphere of diameter 5
max_voxels = 4186 # sphere of diameter 20
compactness_factor = 0.05 # bigger produces rounder cells
pad_mode = 'reflect' # any valid mode in np.pad
# Get stack at 1 um**3 voxels
resized = (PreprocessedStack & key).fetch1('resized')
# Segment
if key['stacksegm_method'] not in [1, 2]:
raise PipelineException('Unrecognized stack segmentation method: {}'.format(
key['stacksegm_method']))
method = 'single' if key['stacksegm_method'] == 1 else 'ensemble'
centroids, probs, segmentation = segmentation3d.segment(resized, method, pad_mode,
seg_threshold, min_voxels,
max_voxels,
compactness_factor)
# Insert
self.insert1({**key, 'nobjects': segmentation.max(),
'segmentation': segmentation})
self.ConvNet().insert1({**key, 'centroids': centroids, 'probs': probs,
'seg_threshold': seg_threshold, 'min_voxels': min_voxels,
'max_voxels': max_voxels,
'compactness_factor': compactness_factor})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
import imageio
from bl3d import utils
volume = (self & key).fetch1('segmentation')
volume = volume[:: int(volume.shape[0] / 8)] # volume at 8 diff depths
colored = utils.colorize_label(volume)
video_filename = '/tmp/' + key_hash(key) + '.gif'
imageio.mimsave(video_filename, colored, duration=1)
msg = 'segmentation for {animal_id}-{session}-{stack_idx}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg,
channel='#pipeline_quality')
@schema
class RegistrationTask(dj.Manual):
definition = """ # declare scan fields to register to a stack as well as channels and method used
-> CorrectedStack.proj(stack_session='session') # animal_id, stack_session, stack_idx, volume_id
-> shared.Channel.proj(stack_channel='channel')
-> experiment.Scan.proj(scan_session='session') # animal_id, scan_session, scan_idx
-> shared.Channel.proj(scan_channel='channel')
-> shared.Field
-> shared.RegistrationMethod
"""
def fill(self, stack_key, scan_key, stack_channel=1, scan_channel=1, method=5):
# Add stack attributes
stack_rel = CorrectedStack() & stack_key
if len(stack_rel) > 1:
raise PipelineException('More than one stack match stack_key {}'.format(
stack_key))
tuple_ = stack_rel.proj(stack_session='session').fetch1()
# Add common attributes
tuple_['stack_channel'] = stack_channel
tuple_['scan_channel'] = scan_channel
tuple_['registration_method'] = method
# Add scan attributes
fields_rel = reso.ScanInfo.Field.proj() + meso.ScanInfo.Field.proj() & scan_key
scan_animal_ids = np.unique(fields_rel.fetch('animal_id'))
if len(scan_animal_ids) > 1 or scan_animal_ids[0] != tuple_['animal_id']:
raise PipelineException('animal_id of stack and scan do not match.')
for field in fields_rel.fetch():
RegistrationTask().insert1({**tuple_, 'scan_session': field['session'],
'scan_idx': field['scan_idx'],
'field': field['field']}, skip_duplicates=True)
@schema
class Registration(dj.Computed):
""" Our affine matrix A is represented as the usual 4 x 4 matrix using homogeneous
coordinates, i.e., each point p is an [x, y, z, 1] vector.
Because each field is flat, the original z coordinate will be the same at each grid
position (zero) and thus it won't affect its final position, so our affine matrix has
only 9 parameters: a11, a21, a31, a12, a22, a32, a14, a24 and a34.
"""
definition = """ # align a 2-d scan field to a stack
-> PreprocessedStack.proj(stack_session='session', stack_channel='channel')
-> RegistrationTask
"""
@property
def key_source(self):
stacks = PreprocessedStack.proj(stack_session='session', stack_channel='channel')
return stacks * RegistrationTask & {'registration_method': 5}
class Rigid(dj.Part):
definition = """ # 3-d template matching keeping the stack straight
-> master
---
reg_x : float # (um) center of field in motor coordinate system
reg_y : float # (um) center of field in motor coordinate system
reg_z : float # (um) center of field in motor coordinate system
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Affine(dj.Part):
definition = """ # affine matrix learned via gradient ascent
-> master
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class NonRigid(dj.Part):
definition = """ # affine plus deformation field learned via gradient descent
-> master
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
landmarks : longblob # (um) x, y position of each landmark (num_landmarks x 2) assuming center of field is at (0, 0)
deformations : longblob # (um) x, y, z deformations per landmark (num_landmarks x 3)
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Params(dj.Part):
definition = """ # document some parameters used for the registration
-> master
---
rigid_zrange : int # microns above and below experimenter's estimate (in z) to search for rigid registration
lr_linear : float # learning rate for the linear part of the affine matrix
lr_translation : float # learning rate for the translation vector
affine_iters : int # number of iterations to learn the affine registration
random_seed : int # seed used to initialize landmark deformations
landmark_gap : int # number of microns between landmarks
rbf_radius : int # critical radius for the gaussian radial basis function
lr_deformations : float # learning rate for the deformation values
wd_deformations : float # regularization term to control size of the deformations
smoothness_factor : float # regularization term to control curvature of warping field
nonrigid_iters : int # number of iterations to optimize for the non-rigid parameters
"""
def make(self, key):
from .utils import registration
from .utils import enhancement
# Set params
rigid_zrange = 80 # microns to search above and below estimated z for rigid registration
lr_linear = 0.001 # learning rate / step size for the linear part of the affine matrix
lr_translation = 1 # learning rate / step size for the translation vector
affine_iters = 200 # number of optimization iterations to learn the affine parameters
random_seed = 1234 # seed for torch random number generator (used to initialize deformations)
landmark_gap = 100 # spacing for the landmarks
rbf_radius = 150 # critical radius for the gaussian rbf
lr_deformations = 0.1 # learning rate / step size for deformation values
wd_deformations = 1e-4 # weight decay for deformations; controls their size
smoothness_factor = 0.01 # factor to keep the deformation field smooth
nonrigid_iters = 200 # number of optimization iterations for the nonrigid parameters
# Get enhanced stack
stack_key = {'animal_id': key['animal_id'], 'session': key['stack_session'],
'stack_idx': key['stack_idx'], 'volume_id': key['volume_id'],
'channel': key['stack_channel']}
original_stack = (PreprocessedStack & stack_key).fetch1('resized')
stack = (PreprocessedStack & stack_key).fetch1('sharpened')
# Get field
field_key = {'animal_id': key['animal_id'], 'session': key['scan_session'],
'scan_idx': key['scan_idx'], 'field': key['field'],
'channel': key['scan_channel']}
pipe = (reso if reso.ScanInfo & field_key else meso if meso.ScanInfo & field_key
else None)
original_field = (pipe.SummaryImages.Average & field_key).fetch1(
'average_image').astype(np.float32)
# Enhance field
field_dims = ((reso.ScanInfo if pipe == reso else meso.ScanInfo.Field) &
field_key).fetch1('um_height', 'um_width')
original_field = registration.resize(original_field, field_dims, desired_res=1)
field = enhancement.sharpen_2pimage(enhancement.lcn(original_field, (15, 15)), 1)
# Drop some edges to avoid artifacts
field = field[15:-15, 15:-15]
stack = stack[5:-5, 15:-15, 15:-15]
# RIGID REGISTRATION
from skimage import feature
# Get initial estimate of field depth from experimenters
field_z = (pipe.ScanInfo.Field & field_key).fetch1('z')
stack_z = (CorrectedStack & stack_key).fetch1('z')
z_limits = stack_z - stack.shape[0] / 2, stack_z + stack.shape[0] / 2
if field_z < z_limits[0] or field_z > z_limits[1]:
print('Warning: Estimated depth ({}) outside stack range ({}-{}).'.format(
field_z, *z_limits))
# Run registration with no rotations
px_z = field_z - stack_z + stack.shape[0] / 2 - 0.5
mini_stack = stack[max(0, int(round(px_z - rigid_zrange))): int(round(
px_z + rigid_zrange))]
corrs = np.stack([feature.match_template(s, field, pad_input=True) for s in
mini_stack])
smooth_corrs = ndimage.gaussian_filter(corrs, 0.7)
# Get results
min_z = max(0, int(round(px_z - rigid_zrange)))
min_y = int(round(0.05 * stack.shape[1]))
min_x = int(round(0.05 * stack.shape[2]))
mini_corrs = smooth_corrs[:, min_y:-min_y, min_x:-min_x]
rig_z, rig_y, rig_x = np.unravel_index(np.argmax(mini_corrs), mini_corrs.shape)
# Rewrite coordinates with respect to original z
rig_z = (min_z + rig_z + 0.5) - stack.shape[0] / 2
rig_y = (min_y + rig_y + 0.5) - stack.shape[1] / 2
rig_x = (min_x + rig_x + 0.5) - stack.shape[2] / 2
del (field_z, stack_z, z_limits, px_z, mini_stack, corrs, smooth_corrs, min_z,
min_y, min_x, mini_corrs)
# AFFINE REGISTRATION
import torch
from torch import optim
import torch.nn.functional as F
def sample_grid(volume, grid):
""" Volume is a d x h x w arrray, grid is a d1 x d2 x 3 (x, y, z) coordinates
and output is a d1 x d2 array"""
norm_factor = torch.as_tensor([s / 2 - 0.5 for s in volume.shape[::-1]])
norm_grid = grid / norm_factor # between -1 and 1
resampled = F.grid_sample(volume.view(1, 1, *volume.shape),
norm_grid.view(1, 1, *norm_grid.shape),
padding_mode='zeros')
return resampled.squeeze()
# Create field grid (height x width x 2)
grid = registration.create_grid(field.shape)
# Create torch tensors
stack_ = torch.as_tensor(stack, dtype=torch.float32)
field_ = torch.as_tensor(field, dtype=torch.float32)
grid_ = torch.as_tensor(grid, dtype=torch.float32)
# Define parameters and optimizer
linear = torch.nn.Parameter(torch.eye(3)[:, :2]) # first two columns of rotation matrix
translation = torch.nn.Parameter(torch.tensor([rig_x, rig_y, rig_z])) # translation vector
affine_optimizer = optim.Adam([{'params': linear, 'lr': lr_linear},
{'params': translation, 'lr': lr_translation}])
# Optimize
for i in range(affine_iters):
# Zero gradients
affine_optimizer.zero_grad()
# Compute gradients
pred_grid = registration.affine_product(grid_, linear, translation) # w x h x 3
pred_field = sample_grid(stack_, pred_grid)
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
print('Corr at iteration {}: {:5.4f}'.format(i, -corr_loss))
corr_loss.backward()
# Update
affine_optimizer.step()
# Save em (originals will be modified during non-rigid registration)
affine_linear = linear.detach().clone()
affine_translation = translation.detach().clone()
# NON-RIGID REGISTRATION
# Inspired by the the Demon's Algorithm (Thirion, 1998)
torch.manual_seed(random_seed) # we use random initialization below
# Create landmarks (and their corresponding deformations)
first_y = int(round((field.shape[0] % landmark_gap) / 2))
first_x = int(round((field.shape[1] % landmark_gap) / 2))
landmarks = grid_[first_x::landmark_gap, first_y::landmark_gap].contiguous().view(
-1, 2) # num_landmarks x 2
# Compute rbf scores between landmarks and grid coordinates and between landmarks
grid_distances = torch.norm(grid_.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2) # w x h x num_landmarks
landmark_distances = torch.norm(landmarks.unsqueeze(-2) - landmarks, dim=-1)
landmark_scores = torch.exp(-(landmark_distances * (1 / 200)) ** 2) # num_landmarks x num_landmarks
# Define parameters and optimizer
deformations = torch.nn.Parameter(torch.randn((landmarks.shape[0], 3)) / 10) # N(0, 0.1)
nonrigid_optimizer = optim.Adam([deformations], lr=lr_deformations,
weight_decay=wd_deformations)
# Optimize
for i in range(nonrigid_iters):
# Zero gradients
affine_optimizer.zero_grad() # we reuse affine_optimizer so the affine matrix changes slowly
nonrigid_optimizer.zero_grad()
# Compute grid with radial basis
affine_grid = registration.affine_product(grid_, linear, translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
pred_field = sample_grid(stack_, pred_grid)
# Compute loss
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
# Compute cosine similarity between landmarks (and weight em by distance)
norm_deformations = deformations / torch.norm(deformations, dim=-1,
keepdim=True)
cosine_similarity = torch.mm(norm_deformations, norm_deformations.t())
reg_term = -((cosine_similarity * landmark_scores).sum() /
landmark_scores.sum())
# Compute gradients
loss = corr_loss + smoothness_factor * reg_term
print('Corr/loss at iteration {}: {:5.4f}/{:5.4f}'.format(i, -corr_loss,
loss))
loss.backward()
# Update
affine_optimizer.step()
nonrigid_optimizer.step()
# Save final results
nonrigid_linear = linear.detach().clone()
nonrigid_translation = translation.detach().clone()
nonrigid_landmarks = landmarks.clone()
nonrigid_deformations = deformations.detach().clone()
# COMPUTE SCORES (USING THE ENHANCED AND CROPPED VERSION OF THE FIELD)
# Rigid
pred_grid = registration.affine_product(grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
pred_field = sample_grid(stack_, pred_grid).numpy()
rig_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Affine
pred_grid = registration.affine_product(grid_, affine_linear, affine_translation)
pred_field = sample_grid(stack_, pred_grid).numpy()
affine_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Non-rigid
affine_grid = registration.affine_product(grid_, nonrigid_linear,
nonrigid_translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, nonrigid_deformations))
pred_grid = affine_grid + warping_field
pred_field = sample_grid(stack_, pred_grid).numpy()
nonrigid_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# FIND FIELDS IN STACK
# Create grid of original size (h x w x 2)
original_grid = registration.create_grid(original_field.shape)
# Create torch tensors
original_stack_ = torch.as_tensor(original_stack, dtype=torch.float32)
original_grid_ = torch.as_tensor(original_grid, dtype=torch.float32)
# Rigid
pred_grid = registration.affine_product(original_grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
rig_field = sample_grid(original_stack_, pred_grid).numpy()
# Affine
pred_grid = registration.affine_product(original_grid_, affine_linear,
affine_translation)
affine_field = sample_grid(original_stack_, pred_grid).numpy()
# Non-rigid
affine_grid = registration.affine_product(original_grid_, nonrigid_linear,
nonrigid_translation)
original_grid_distances = torch.norm(original_grid_.unsqueeze(-2) -
nonrigid_landmarks, dim=-1)
original_grid_scores = torch.exp(-(original_grid_distances * (1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (original_grid_scores,
nonrigid_deformations))
pred_grid = affine_grid + warping_field
nonrigid_field = sample_grid(original_stack_, pred_grid).numpy()
# Insert
stack_z, stack_y, stack_x = (CorrectedStack & stack_key).fetch1('z', 'y', 'x')
self.insert1(key)
self.Params.insert1({**key, 'rigid_zrange': rigid_zrange, 'lr_linear': lr_linear,
'lr_translation': lr_translation,
'affine_iters': affine_iters, 'random_seed': random_seed,
'landmark_gap': landmark_gap, 'rbf_radius': rbf_radius,
'lr_deformations': lr_deformations,
'wd_deformations': wd_deformations,
'smoothness_factor': smoothness_factor,
'nonrigid_iters': nonrigid_iters})
self.Rigid.insert1({**key, 'reg_x': stack_x + rig_x, 'reg_y': stack_y + rig_y,
'reg_z': stack_z + rig_z, 'score': rig_score,
'reg_field': rig_field})
self.Affine.insert1({**key, 'a11': affine_linear[0, 0].item(),
'a21': affine_linear[1, 0].item(),
'a31': affine_linear[2, 0].item(),
'a12': affine_linear[0, 1].item(),
'a22': affine_linear[1, 1].item(),
'a32': affine_linear[2, 1].item(),
'reg_x': stack_x + affine_translation[0].item(),
'reg_y': stack_y + affine_translation[1].item(),
'reg_z': stack_z + affine_translation[2].item(),
'score': affine_score, 'reg_field': affine_field})
self.NonRigid.insert1({**key, 'a11': nonrigid_linear[0, 0].item(),
'a21': nonrigid_linear[1, 0].item(),
'a31': nonrigid_linear[2, 0].item(),
'a12': nonrigid_linear[0, 1].item(),
'a22': nonrigid_linear[1, 1].item(),
'a32': nonrigid_linear[2, 1].item(),
'reg_x': stack_x + nonrigid_translation[0].item(),
'reg_y': stack_y + nonrigid_translation[1].item(),
'reg_z': stack_z + nonrigid_translation[2].item(),
'landmarks': nonrigid_landmarks.numpy(),
'deformations': nonrigid_deformations.numpy(),
'score': nonrigid_score, 'reg_field': nonrigid_field})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
# No notifications
pass
def get_grid(self, type='affine', desired_res=1):
""" Get registered grid for this registration. """
import torch
from .utils import registration
# Get field
field_key = self.proj(session='scan_session')
field_dims = (reso.ScanInfo & field_key or meso.ScanInfo.Field &
field_key).fetch1('um_height', 'um_width')
# Create grid at desired resolution
grid = registration.create_grid(field_dims, desired_res=desired_res) # h x w x 2
grid = torch.as_tensor(grid, dtype=torch.float32)
# Apply required transform
if type == 'rigid':
params = (Registration.Rigid & self).fetch1('reg_x', 'reg_y', 'reg_z')
delta_x, delta_y, delta_z = params
linear = torch.eye(3)[:, :2]
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'affine':
params = (Registration.Affine & self).fetch1('a11', 'a21', 'a31', 'a12',
'a22', 'a32', 'reg_x', 'reg_y',
'reg_z')
a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'nonrigid':
params = (Registration.NonRigid & self).fetch1('a11', 'a21', 'a31', 'a12',
'a22', 'a32', 'reg_x', 'reg_y',
'reg_z', 'landmarks',
'deformations')
rbf_radius = (Registration.Params & self).fetch1('rbf_radius')
a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z, landmarks, deformations = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
landmarks = torch.from_numpy(landmarks)
deformations = torch.from_numpy(deformations)
affine_grid = registration.affine_product(grid, linear, translation)
grid_distances = torch.norm(grid.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
else:
raise PipelineException('Unrecognized registration.')
return pred_grid.numpy()
def plot_grids(self, desired_res=5):
""" Plot the grids for this different registrations as 3-d surfaces."""
# Get grids at desired resoultion
rig_grid = self.get_grid('rigid', desired_res)
affine_grid = self.get_grid('affine', desired_res)
nonrigid_grid = self.get_grid('nonrigid', desired_res)
# Plot surfaces
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
fig = plt.figure(figsize=plt.figaspect(0.5) * 1.5)
ax = fig.gca(projection='3d')
ax.plot_surface(rig_grid[..., 0], rig_grid[..., 1], rig_grid[..., 2], alpha=0.5)
ax.plot_surface(affine_grid[..., 0], affine_grid[..., 1], affine_grid[..., 2],
alpha=0.5)
ax.plot_surface(nonrigid_grid[..., 0], nonrigid_grid[..., 1],
nonrigid_grid[..., 2], alpha=0.5)
ax.set_aspect('equal')
ax.invert_zaxis()
return fig
@schema
class FieldSegmentation(dj.Computed):
definition = """ # structural segmentation of a 2-d field (using the affine registration)
-> Segmentation.proj(stack_session='session', stacksegm_channel='channel')
-> Registration
---
segm_field : longblob # field (image x height) of cell ids at 1 um/px
"""
class StackUnit(dj.Part):
definition = """ # single unit from the stack that appears in the field
-> master
sunit_id : int # id in the stack segmentation
---
depth : int # (um) size in z
height : int # (um) size in y
width : int # (um) size in x
volume : float # (um) volume of the 3-d unit
area : float # (um) area of the 2-d mask
sunit_z : float # (um) centroid for the 3d unit in the motor coordinate system
sunit_y : float # (um) centroid for the 3d unit in the motor coordinate system
sunit_x : float # (um) centroid for the 3d unit in the motor coordinate system
mask_z : float # (um) centroid for the 2d mask in the motor coordinate system
mask_y : float # (um) centroid for the 2d mask in the motor coordinate system
mask_x : float # (um) centroid for the 2d mask in the motor coordinate system
distance : float # (um) euclidean distance between centroid of 2-d mask and 3-d unit
"""
def _make_tuples(self, key):
from skimage import measure
# Get structural segmentation
stack_key = {'animal_id': key['animal_id'], 'session': key['stack_session'],
'stack_idx': key['stack_idx'], 'volume_id': key['volume_id'],
'channel': key['stacksegm_channel']}
instance = (Segmentation & stack_key).fetch1('segmentation')
# Get segmented field
grid = (Registration & key).get_grid(type='affine', desired_res=1)
stack_center = np.array((CorrectedStack & stack_key).fetch1('z', 'y', 'x'))
px_grid = (grid[..., ::-1] - stack_center - 0.5 + np.array(instance.shape) / 2)
segmented_field = ndimage.map_coordinates(instance, np.moveaxis(px_grid, -1, 0),
order=0) # nearest neighbor sampling
# Insert in FieldSegmentation
self.insert1({**key, 'segm_field': segmented_field})
# Insert each StackUnit
instance_props = measure.regionprops(instance)
instance_labels = np.array([p.label for p in instance_props])
for prop in measure.regionprops(segmented_field):
sunit_id = prop.label
instance_prop = instance_props[np.argmax(instance_labels == sunit_id)]
depth = (instance_prop.bbox[3] - instance_prop.bbox[0])
height = (instance_prop.bbox[4] - instance_prop.bbox[1])
width = (instance_prop.bbox[5] - instance_prop.bbox[2])
volume = instance_prop.area
sunit_z, sunit_y, sunit_x = (stack_center + np.array(instance_prop.centroid) -
np.array(instance.shape) / 2 + 0.5)
binary_sunit = segmented_field == sunit_id
area = np.count_nonzero(binary_sunit)
px_y, px_x = ndimage.measurements.center_of_mass(binary_sunit)
px_coords = np.array([[px_y], [px_x]])
mask_x, mask_y, mask_z = [ndimage.map_coordinates(grid[..., i], px_coords,
order=1)[0] for i in
range(3)]
distance = np.sqrt((sunit_z - mask_z) ** 2 + (sunit_y - mask_y) ** 2 +
(sunit_x - mask_x) ** 2)
# Insert in StackUnit
self.StackUnit.insert1({**key, 'sunit_id': sunit_id, 'depth': depth,
'height': height, 'width': width, 'volume': volume,
'area': area, 'sunit_z': sunit_z, 'sunit_y': sunit_y,
'sunit_x': sunit_x, 'mask_z': mask_z,
'mask_y': mask_y, 'mask_x': mask_x,
'distance': distance})
@schema
class RegistrationOverTime(dj.Computed):
definition = """ # register a field at different timepoints of recording
-> PreprocessedStack.proj(stack_session='session', stack_channel='channel')
-> RegistrationTask
"""
@property
def key_source(self):
stacks = PreprocessedStack.proj(stack_session='session', stack_channel='channel')
return stacks * RegistrationTask & {'registration_method': 5}
class Chunk(dj.Part):
definition = """ # single registered chunk
-> master
frame_num : int # frame number of the frame in the middle of this chunk
---
initial_frame : int # initial frame used in this chunk (1-based)
final_frame : int # final frame used in this chunk (1-based)
avg_chunk : longblob # average field used for registration
"""
def get_grid(self, type='nonrigid', desired_res=1):
# TODO: Taken verbatim from Registration (minor changes for formatting), refactor
""" Get registered grid for this registration. """
import torch
from .utils import registration
# Get field
field_key = self.proj(session='scan_session')
field_dims = (reso.ScanInfo & field_key or meso.ScanInfo.Field &
field_key).fetch1('um_height', 'um_width')
# Create grid at desired resolution
grid = registration.create_grid(field_dims, desired_res=desired_res) # h x w x 2
grid = torch.as_tensor(grid, dtype=torch.float32)
# Apply required transform
if type == 'rigid':
params = (RegistrationOverTime.Rigid & self).fetch1('reg_x', 'reg_y',
'reg_z')
delta_x, delta_y, delta_z = params
linear = torch.eye(3)[:, :2]
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'affine':
params = (RegistrationOverTime.Affine & self).fetch1('a11', 'a21', 'a31',
'a12', 'a22', 'a32',
'reg_x', 'reg_y',
'reg_z')
a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'nonrigid':
params = (RegistrationOverTime.NonRigid & self).fetch1('a11', 'a21',
'a31', 'a12',
'a22', 'a32',
'reg_x', 'reg_y',
'reg_z',
'landmarks',
'deformations')
rbf_radius = (RegistrationOverTime.Params & self).fetch1('rbf_radius')
(a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z, landmarks,
deformations) = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
landmarks = torch.from_numpy(landmarks)
deformations = torch.from_numpy(deformations)
affine_grid = registration.affine_product(grid, linear, translation)
grid_distances = torch.norm(grid.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
else:
raise PipelineException('Unrecognized registration.')
return pred_grid.numpy()
class Rigid(dj.Part):
definition = """ # rigid registration of a single chunk
-> RegistrationOverTime.Chunk
---
reg_x : float # (um) center of field in motor coordinate system
reg_y : float # (um) center of field in motor coordinate system
reg_z : float # (um) center of field in motor coordinate system
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Affine(dj.Part):
definition = """ # affine matrix learned via gradient ascent
-> RegistrationOverTime.Chunk
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class NonRigid(dj.Part):
definition = """ # affine plus deformation field learned via gradient descent
-> RegistrationOverTime.Chunk
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
landmarks : longblob # (um) x, y position of each landmark (num_landmarks x 2) assuming center of field is at (0, 0)
deformations : longblob # (um) x, y, z deformations per landmark (num_landmarks x 3)
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Params(dj.Part):
definition = """ # document some parameters used for the registration
-> master
---
rigid_zrange : int # microns above and below experimenter's estimate (in z) to search for rigid registration
lr_linear : float # learning rate for the linear part of the affine matrix
lr_translation : float # learning rate for the translation vector
affine_iters : int # number of iterations to learn the affine registration
random_seed : int # seed used to initialize landmark deformations
landmark_gap : int # number of microns between landmarks
rbf_radius : int # critical radius for the gaussian radial basis function
lr_deformations : float # learning rate for the deformation values
wd_deformations : float # regularization term to control size of the deformations
smoothness_factor : float # regularization term to control curvature of warping field
nonrigid_iters : int # number of iterations to optimize for the non-rigid parameters
"""
def make(self, key):
from .utils import registration
from .utils import enhancement
# Set params
rigid_zrange = 80 # microns to search above and below estimated z for rigid registration
lr_linear = 0.001 # learning rate / step size for the linear part of the affine matrix
lr_translation = 1 # learning rate / step size for the translation vector
affine_iters = 200 # number of optimization iterations to learn the affine parameters
random_seed = 1234 # seed for torch random number generator (used to initialize deformations)
landmark_gap = 100 # spacing for the landmarks
rbf_radius = 150 # critical radius for the gaussian rbf
lr_deformations = 0.1 # learning rate / step size for deformation values
wd_deformations = 1e-4 # weight decay for deformations; controls their size
smoothness_factor = 0.01 # factor to keep the deformation field smooth
nonrigid_iters = 200 # number of optimization iterations for the nonrigid parameters
# Get enhanced stack
stack_key = {'animal_id': key['animal_id'], 'session': key['stack_session'],
'stack_idx': key['stack_idx'], 'volume_id': key['volume_id'],
'channel': key['stack_channel']}
original_stack = (PreprocessedStack & stack_key).fetch1('resized')
stack = (PreprocessedStack & stack_key).fetch1('sharpened')
stack = stack[5:-5, 15:-15, 15:-15] # drop some edges
# Get corrected scan
field_key = {'animal_id': key['animal_id'], 'session': key['scan_session'],
'scan_idx': key['scan_idx'], 'field': key['field'],
'channel': key['scan_channel']}
pipe = (reso if reso.ScanInfo & field_key else meso if meso.ScanInfo & field_key
else None)
scan = RegistrationOverTime._get_corrected_scan(field_key)
# Get initial estimate of field depth from experimenters
field_z = (pipe.ScanInfo.Field & field_key).fetch1('z')
stack_z = (CorrectedStack & stack_key).fetch1('z')
z_limits = stack_z - stack.shape[0] / 2, stack_z + stack.shape[0] / 2
if field_z < z_limits[0] or field_z > z_limits[1]:
print('Warning: Estimated depth ({}) outside stack range ({}-{}).'.format(
field_z, *z_limits))
# Compute best chunk size: each lasts the same (~15 minutes)
fps = (pipe.ScanInfo & field_key).fetch1('fps')
num_frames = scan.shape[-1]
overlap = int(round(3 * 60 * fps)) # ~ 3 minutes
num_chunks = int(np.ceil((num_frames - overlap) / (15 * 60 * fps - overlap)))
chunk_size = int(np.floor((num_frames - overlap) / num_chunks + overlap)) # *
# * distributes frames in the last (incomplete) chunk to the other chunks
# Insert in RegistrationOverTime and Params (once per field)
self.insert1(key)
self.Params.insert1(
{**key, 'rigid_zrange': rigid_zrange, 'lr_linear': lr_linear,
'lr_translation': lr_translation, 'affine_iters': affine_iters,
'random_seed': random_seed, 'landmark_gap': landmark_gap,
'rbf_radius': rbf_radius, 'lr_deformations': lr_deformations,
'wd_deformations': wd_deformations, 'smoothness_factor': smoothness_factor,
'nonrigid_iters': nonrigid_iters})
# Iterate over chunks
for initial_frame in range(0, num_frames - chunk_size, chunk_size - overlap):
# Get next chunk
final_frame = initial_frame + chunk_size
chunk = scan[..., initial_frame: final_frame]
# Enhance field
field_dims = ((reso.ScanInfo if pipe == reso else meso.ScanInfo.Field) &
field_key).fetch1('um_height', 'um_width')
original_field = registration.resize(chunk.mean(-1), field_dims,
desired_res=1)
field = enhancement.sharpen_2pimage(enhancement.lcn(original_field, 15), 1)
field = field[15:-15, 15:-15] # drop some edges
# TODO: From here until Insert is taken verbatim from Registration, refactor
# RIGID REGISTRATION
from skimage import feature
# Run registration with no rotations
px_z = field_z - stack_z + stack.shape[0] / 2 - 0.5
mini_stack = stack[max(0, int(round(px_z - rigid_zrange))): int(round(
px_z + rigid_zrange))]
corrs = np.stack([feature.match_template(s, field, pad_input=True) for s in
mini_stack])
smooth_corrs = ndimage.gaussian_filter(corrs, 0.7)
# Get results
min_z = max(0, int(round(px_z - rigid_zrange)))
min_y = int(round(0.05 * stack.shape[1]))
min_x = int(round(0.05 * stack.shape[2]))
mini_corrs = smooth_corrs[:, min_y:-min_y, min_x:-min_x]
rig_z, rig_y, rig_x = np.unravel_index(np.argmax(mini_corrs),
mini_corrs.shape)
# Rewrite coordinates with respect to original z
rig_z = (min_z + rig_z + 0.5) - stack.shape[0] / 2
rig_y = (min_y + rig_y + 0.5) - stack.shape[1] / 2
rig_x = (min_x + rig_x + 0.5) - stack.shape[2] / 2
del px_z, mini_stack, corrs, smooth_corrs, min_z, min_y, min_x, mini_corrs
# AFFINE REGISTRATION
import torch
from torch import optim
import torch.nn.functional as F
def sample_grid(volume, grid):
""" Volume is a d x h x w arrray, grid is a d1 x d2 x 3 (x, y, z)
coordinates and output is a d1 x d2 array"""
norm_factor = torch.as_tensor([s / 2 - 0.5 for s in volume.shape[::-1]])
norm_grid = grid / norm_factor # between -1 and 1
resampled = F.grid_sample(volume.view(1, 1, *volume.shape),
norm_grid.view(1, 1, *norm_grid.shape),
padding_mode='zeros')
return resampled.squeeze()
# Create field grid (height x width x 2)
grid = registration.create_grid(field.shape)
# Create torch tensors
stack_ = torch.as_tensor(stack, dtype=torch.float32)
field_ = torch.as_tensor(field, dtype=torch.float32)
grid_ = torch.as_tensor(grid, dtype=torch.float32)
# Define parameters and optimizer
linear = torch.nn.Parameter(torch.eye(3)[:, :2]) # first two columns of rotation matrix
translation = torch.nn.Parameter(torch.tensor([rig_x, rig_y, rig_z])) # translation vector
affine_optimizer = optim.Adam([{'params': linear, 'lr': lr_linear},
{'params': translation, 'lr': lr_translation}])
# Optimize
for i in range(affine_iters):
# Zero gradients
affine_optimizer.zero_grad()
# Compute gradients
pred_grid = registration.affine_product(grid_, linear, translation) # w x h x 3
pred_field = sample_grid(stack_, pred_grid)
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
print('Corr at iteration {}: {:5.4f}'.format(i, -corr_loss))
corr_loss.backward()
# Update
affine_optimizer.step()
# Save them (originals will be modified during non-rigid registration)
affine_linear = linear.detach().clone()
affine_translation = translation.detach().clone()
# NON-RIGID REGISTRATION
# Inspired by the the Demon's Algorithm (Thirion, 1998)
torch.manual_seed(random_seed) # we use random initialization below
# Create landmarks (and their corresponding deformations)
first_y = int(round((field.shape[0] % landmark_gap) / 2))
first_x = int(round((field.shape[1] % landmark_gap) / 2))
landmarks = grid_[first_x::landmark_gap,
first_y::landmark_gap].contiguous().view(-1, 2) # num_landmarks x 2
# Compute rbf scores between landmarks and grid coordinates and between landmarks
grid_distances = torch.norm(grid_.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2) # w x h x num_landmarks
landmark_distances = torch.norm(landmarks.unsqueeze(-2) - landmarks, dim=-1)
landmark_scores = torch.exp(-(landmark_distances * (1 / 200)) ** 2) # num_landmarks x num_landmarks
# Define parameters and optimizer
deformations = torch.nn.Parameter(torch.randn((landmarks.shape[0], 3)) / 10) # N(0, 0.1)
nonrigid_optimizer = optim.Adam([deformations], lr=lr_deformations,
weight_decay=wd_deformations)
# Optimize
for i in range(nonrigid_iters):
# Zero gradients
affine_optimizer.zero_grad() # we reuse affine_optimizer so the affine matrix changes slowly
nonrigid_optimizer.zero_grad()
# Compute grid with radial basis
affine_grid = registration.affine_product(grid_, linear, translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
pred_field = sample_grid(stack_, pred_grid)
# Compute loss
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
# Compute cosine similarity between landmarks (and weight em by distance)
norm_deformations = deformations / torch.norm(deformations, dim=-1,
keepdim=True)
cosine_similarity = torch.mm(norm_deformations, norm_deformations.t())
reg_term = -((cosine_similarity * landmark_scores).sum() /
landmark_scores.sum())
# Compute gradients
loss = corr_loss + smoothness_factor * reg_term
print('Corr/loss at iteration {}: {:5.4f}/{:5.4f}'.format(i, -corr_loss,
loss))
loss.backward()
# Update
affine_optimizer.step()
nonrigid_optimizer.step()
# Save final results
nonrigid_linear = linear.detach().clone()
nonrigid_translation = translation.detach().clone()
nonrigid_landmarks = landmarks.clone()
nonrigid_deformations = deformations.detach().clone()
# COMPUTE SCORES (USING THE ENHANCED AND CROPPED VERSION OF THE FIELD)
# Rigid
pred_grid = registration.affine_product(grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
pred_field = sample_grid(stack_, pred_grid).numpy()
rig_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Affine
pred_grid = registration.affine_product(grid_, affine_linear,
affine_translation)
pred_field = sample_grid(stack_, pred_grid).numpy()
affine_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Non-rigid
affine_grid = registration.affine_product(grid_, nonrigid_linear,
nonrigid_translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, nonrigid_deformations))
pred_grid = affine_grid + warping_field
pred_field = sample_grid(stack_, pred_grid).numpy()
nonrigid_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# FIND FIELDS IN STACK
# Create grid of original size (h x w x 2)
original_grid = registration.create_grid(original_field.shape)
# Create torch tensors
original_stack_ = torch.as_tensor(original_stack, dtype=torch.float32)
original_grid_ = torch.as_tensor(original_grid, dtype=torch.float32)
# Rigid
pred_grid = registration.affine_product(original_grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
rig_field = sample_grid(original_stack_, pred_grid).numpy()
# Affine
pred_grid = registration.affine_product(original_grid_, affine_linear,
affine_translation)
affine_field = sample_grid(original_stack_, pred_grid).numpy()
# Non-rigid
affine_grid = registration.affine_product(original_grid_, nonrigid_linear,
nonrigid_translation)
original_grid_distances = torch.norm(original_grid_.unsqueeze(-2) -
nonrigid_landmarks, dim=-1)
original_grid_scores = torch.exp(-(original_grid_distances *
(1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (original_grid_scores,
nonrigid_deformations))
pred_grid = affine_grid + warping_field
nonrigid_field = sample_grid(original_stack_, pred_grid).numpy()
# Insert chunk
stack_z, stack_y, stack_x = (CorrectedStack & stack_key).fetch1('z', 'y', 'x')
frame_num = int(round((initial_frame + final_frame) / 2))
self.Chunk.insert1({**key, 'frame_num': frame_num + 1,
'initial_frame': initial_frame + 1,
'final_frame': final_frame, 'avg_chunk': original_field})
self.Rigid.insert1({**key, 'frame_num': frame_num + 1,
'reg_x': stack_x + rig_x, 'reg_y': stack_y + rig_y,
'reg_z': stack_z + rig_z, 'score': rig_score,
'reg_field': rig_field})
self.Affine.insert1({**key, 'frame_num': frame_num + 1,
'a11': affine_linear[0, 0].item(),
'a21': affine_linear[1, 0].item(),
'a31': affine_linear[2, 0].item(),
'a12': affine_linear[0, 1].item(),
'a22': affine_linear[1, 1].item(),
'a32': affine_linear[2, 1].item(),
'reg_x': stack_x + affine_translation[0].item(),
'reg_y': stack_y + affine_translation[1].item(),
'reg_z': stack_z + affine_translation[2].item(),
'score': affine_score,
'reg_field': affine_field})
self.NonRigid.insert1({**key, 'frame_num': frame_num + 1,
'a11': nonrigid_linear[0, 0].item(),
'a21': nonrigid_linear[1, 0].item(),
'a31': nonrigid_linear[2, 0].item(),
'a12': nonrigid_linear[0, 1].item(),
'a22': nonrigid_linear[1, 1].item(),
'a32': nonrigid_linear[2, 1].item(),
'reg_x': stack_x + nonrigid_translation[0].item(),
'reg_y': stack_y + nonrigid_translation[1].item(),
'reg_z': stack_z + nonrigid_translation[2].item(),
'landmarks': nonrigid_landmarks.numpy(),
'deformations': nonrigid_deformations.numpy(),
'score': nonrigid_score, 'reg_field': nonrigid_field})
# self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
frame_num, zs, scores = (self.Affine & key).fetch('frame_num', 'reg_z', 'score')
plt.plot(frame_num, -zs, zorder=1)
plt.scatter(frame_num, -zs, marker='*', s=scores * 70, zorder=2, color='r')
plt.title('Registration over time (star size represents confidence)')
plt.ylabel('z (surface at 0)')
plt.xlabel('Frames')
img_filename = '/tmp/{}.png'.format(key_hash(key))
plt.savefig(img_filename)
plt.close()
msg = ('registration over time of {animal_id}-{scan_session}-{scan_idx} field '
'{field} to {animal_id}-{stack_session}-{stack_idx}')
msg = msg.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key &
{'session': key['stack_session']})
slack_user.notify(file=img_filename, file_title=msg)
def _get_corrected_scan(key):
# Read scan
scan_filename = (experiment.Scan & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Get some params
pipe = reso if (reso.ScanInfo() & key) else meso
# Map: Correct scan in parallel
f = performance.parallel_correct_scan # function to map
raster_phase = (pipe.RasterCorrection & key).fetch1('raster_phase')
fill_fraction = (pipe.ScanInfo & key).fetch1('fill_fraction')
y_shifts, x_shifts = (pipe.MotionCorrection & key).fetch1('y_shifts', 'x_shifts')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'y_shifts': y_shifts, 'x_shifts': x_shifts}
results = performance.map_frames(f, scan, field_id=key['field'] - 1,
channel=key['channel'] - 1, kwargs=kwargs)
# Reduce: Make a single array (height x width x num_frames)
height, width, _ = results[0][1].shape
corrected_scan = np.zeros([height, width, scan.num_frames], dtype=np.float32)
for frames, chunk in results:
corrected_scan[..., frames] = chunk
return corrected_scan
def session_plot(self):
""" Create a registration plot for the session"""
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Check that plot is restricted to a single stack and a single session
regot_key = self.fetch('KEY', limit=1)[0]
stack_key = {n: regot_key[n] for n in ['animal_id', 'stack_session', 'stack_idx',
'volume_id']}
session_key = {n: regot_key[n] for n in ['animal_id', 'scan_session']}
if len(self & stack_key) != len(self):
raise PipelineException('Plot can only be generated for one stack at a time')
if len(self & session_key) != len(self):
raise PipelineException('Plot can only be generated for one session at a '
'time')
# Get field times and depths
ts = []
zs = []
session_ts = (experiment.Session & regot_key &
{'session': regot_key['scan_session']}).fetch1('session_ts')
for key in self.fetch('KEY'):
field_key = {'animal_id': key['animal_id'], 'session': key['scan_session'],
'scan_idx': key['scan_idx'], 'field': key['field']}
scan_ts = (experiment.Scan & field_key).fetch1('scan_ts')
fps = (reso.ScanInfo & field_key or meso.ScanInfo & field_key).fetch1('fps')
frame_nums, field_zs = (RegistrationOverTime.Affine & key).fetch('frame_num',
'reg_z')
field_ts = (scan_ts - session_ts).seconds + frame_nums / fps # in seconds
ts.append(field_ts)
zs.append(field_zs)
# Plot
fig = plt.figure(figsize=(20, 8))
for ts_, zs_ in zip(ts, zs):
plt.plot(ts_ / 3600, zs_)
plt.title('Registered zs for {animal_id}-{scan_session} into {animal_id}-'
'{stack_session}-{stack_idx} starting at {t}'.format(t=session_ts,
**regot_key))
plt.ylabel('Registered zs')
plt.xlabel('Hours')
# Plot formatting
plt.gca().invert_yaxis()
plt.gca().yaxis.set_major_locator(ticker.MultipleLocator(10))
plt.grid(linestyle='--', alpha=0.8)
return fig
@schema
class Drift(dj.Computed):
definition = """ # assuming a linear drift, compute the rate of drift (of the affine registration)
-> RegistrationOverTime
---
z_slope : float # (um/hour) drift of the center of the field
y_slope : float # (um/hour) drift of the center of the field
x_slope : float # (um/hour) drift of the center of the field
z_rmse : float # (um) root mean squared error of the fit
y_rmse : float # (um) root mean squared error of the fit
x_rmse : float # (um) root mean squared error of the fit
"""
@property
def key_source(self):
return RegistrationOverTime.aggr(RegistrationOverTime.Chunk.proj(),
nchunks='COUNT(*)') & 'nchunks > 1'
def _make_tuples(self, key):
from sklearn import linear_model
# Get drifts per axis
frame_nums, zs, ys, xs = (RegistrationOverTime.Affine & key).fetch('frame_num',
'reg_z', 'reg_y', 'reg_x')
# Get scan fps
field_key = {**key, 'session': key['scan_session']}
fps = (reso.ScanInfo() & field_key or meso.ScanInfo() & field_key).fetch1('fps')
# Fit a line through the values (robust regression)
slopes = []
rmses = []
X = frame_nums.reshape(-1, 1)
for y in [zs, ys, xs]:
model = linear_model.TheilSenRegressor()
model.fit(X, y)
slopes.append(model.coef_[0] * fps * 3600)
rmses.append(np.sqrt(np.mean(zs - model.predict(X)) ** 2))
self.insert1({**key, 'z_slope': slopes[0], 'y_slope': slopes[1],
'x_slope': slopes[2], 'z_rmse': rmses[0], 'y_rmse': rmses[1],
'x_rmse': rmses[2]})
def session_plot(self):
""" Create boxplots for the session (one per scan)."""
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Check that plot is restricted to a single stack and a single session
regot_key = self.fetch('KEY', limit=1)[0]
stack_key = {n: regot_key[n] for n in ['animal_id', 'stack_session', 'stack_idx',
'volume_id']}
session_key = {n: regot_key[n] for n in ['animal_id', 'scan_session']}
if len(self & stack_key) != len(self):
raise PipelineException('Plot can only be generated for one stack at a time')
if len(self & session_key) != len(self):
raise PipelineException('Plot can only be generated for one session at a '
'time')
# Get field times and depths
z_slopes = []
scan_idxs = np.unique(self.fetch('scan_idx'))
for scan_idx in scan_idxs:
scan_slopes = (self & {**session_key, 'scan_idx': scan_idx}).fetch('z_slope')
z_slopes.append(scan_slopes)
# Plot
fig = plt.figure(figsize=(7, 4))
plt.boxplot(z_slopes)
plt.title('Z drift for {animal_id}-{scan_session} into {animal_id}-'
'{stack_session}-{stack_idx}'.format(**regot_key))
plt.ylabel('Z drift (um/hour)')
plt.xlabel('Scans')
plt.xticks(range(1, len(scan_idxs) + 1), scan_idxs)
# Plot formatting
plt.gca().invert_yaxis()
plt.gca().yaxis.set_major_locator(ticker.MultipleLocator(5))
plt.grid(linestyle='--', alpha=0.8)
return fig
@schema
class StackSet(dj.Computed):
definition = """ # match segmented masks by proximity in the stack
-> CorrectedStack.proj(stack_session='session') # animal_id, stack_session, stack_idx, volume_id
-> shared.RegistrationMethod
-> shared.SegmentationMethod
---
min_distance :tinyint # distance used as threshold to accept two masks as the same
max_height :tinyint # maximum allowed height of a joint mask
"""
@property
def key_source(self):
return (CorrectedStack.proj(stack_session='session') *
shared.RegistrationMethod.proj() * shared.SegmentationMethod.proj() &
Registration & {'segmentation_method': 6})
class Unit(dj.Part):
definition = """ # a unit in the stack
-> master
munit_id :int # unique id in the stack
---
munit_x :float # (um) position of centroid in motor coordinate system
munit_y :float # (um) position of centroid in motor coordinate system
munit_z :float # (um) position of centroid in motor coordinate system
"""
class Match(dj.Part):
definition = """ # Scan unit to stack unit match (n:1 relation)
-> master
-> experiment.Scan.proj(scan_session='session') # animal_id, scan_session, scan_idx
unit_id :int # unit id from ScanSet.Unit
---
-> StackSet.Unit
"""
class MatchedUnit():
""" Coordinates for a set of masks that form a single cell."""
def __init__(self, key, x, y, z, plane_id):
self.keys = [key]
self.xs = [x]
self.ys = [y]
self.zs = [z]
self.plane_ids = [plane_id]
self.centroid = [x, y, z]
def join_with(self, other):
self.keys += other.keys
self.xs += other.xs
self.ys += other.ys
self.zs += other.zs
self.plane_ids += other.plane_ids
self.centroid = [np.mean(self.xs), np.mean(self.ys), np.mean(self.zs)]
def __lt__(self, other):
""" Used for sorting. """
return True
def make(self, key):
from scipy.spatial import distance
import bisect
# Set some params
min_distance = 10
max_height = 20
# Create list of units
units = [] # stands for matched units
for field in Registration & key:
# Edge case: when two channels are registered, we don't know which to use
if len(Registration.proj(ignore='scan_channel') & field) > 1:
msg = ('More than one channel was registered for {animal_id}-'
'{scan_session}-{scan_idx} field {field}'.format(**field))
raise PipelineException(msg)
# Get registered grid
field_key = {'animal_id': field['animal_id'],
'session': field['scan_session'], 'scan_idx': field['scan_idx'],
'field': field['field']}
pipe = reso if reso.ScanInfo & field_key else meso
um_per_px = ((reso.ScanInfo if pipe == reso else meso.ScanInfo.Field) &
field_key).microns_per_pixel
grid = (Registration & field).get_grid(type='affine', desired_res=um_per_px)
# Create cell objects
for channel_key in (pipe.ScanSet & field_key &
{'segmentation_method': key['segmentation_method']}): # *
somas = pipe.MaskClassification.Type & {'type': 'soma'}
field_somas = pipe.ScanSet.Unit & channel_key & somas
unit_keys, xs, ys = (pipe.ScanSet.UnitInfo & field_somas).fetch('KEY',
'px_x', 'px_y')
px_coords = np.stack([ys, xs])
xs, ys, zs = [ndimage.map_coordinates(grid[..., i], px_coords, order=1)
for i in range(3)]
units += [StackSet.MatchedUnit(*args, key_hash(channel_key)) for args in
zip(unit_keys, xs, ys, zs)]
# * Separating masks per channel allows masks in diff channels to be matched
print(len(units), 'initial units')
def find_close_units(centroid, centroids, min_distance):
""" Finds centroids that are closer than min_distance to centroid. """
dists = distance.cdist(np.expand_dims(centroid, 0), centroids)
indices = np.flatnonzero(dists < min_distance)
return indices, dists[0, indices]
def is_valid(unit1, unit2, max_height):
""" Checks that units belong to different fields and that the resulting unit
would not be bigger than 20 microns."""
different_fields = len(set(unit1.plane_ids) & set(unit2.plane_ids)) == 0
acceptable_height = (max(unit1.zs + unit2.zs) - min(
unit1.zs + unit2.zs)) < max_height
return different_fields and acceptable_height
# Create distance matrix
# For memory efficiency we use an adjacency list with only the units at less than 10 microns
centroids = np.stack([u.centroid for u in units])
distance_list = [] # list of triples (distance, unit1, unit2)
for i in range(len(units)):
indices, distances = find_close_units(centroids[i], centroids[i + 1:],
min_distance)
for dist, j in zip(distances, i + 1 + indices):
if is_valid(units[i], units[j], max_height):
bisect.insort(distance_list, (dist, units[i], units[j]))
print(len(distance_list), 'possible pairings')
# Join units
while (len(distance_list) > 0):
# Get next pair of units
d, unit1, unit2 = distance_list.pop(0)
# Remove them from lists
units.remove(unit1)
units.remove(unit2)
f = lambda x: (unit1 not in x[1:]) and (unit2 not in x[1:])
distance_list = list(filter(f, distance_list))
# Join them
unit1.join_with(unit2)
# Recalculate distances
centroids = [u.centroid for u in units]
indices, distances = find_close_units(unit1.centroid, centroids, min_distance)
for dist, j in zip(distances, indices):
if is_valid(unit1, units[j], max_height):
bisect.insort(distance_list, (d, unit1, units[j]))
# Insert new unit
units.append(unit1)
print(len(units), 'number of final masks')
# Insert
self.insert1({**key, 'min_distance': min_distance, 'max_height': max_height})
for munit_id, munit in zip(itertools.count(start=1), units):
new_unit = {**key, 'munit_id': munit_id, 'munit_x': munit.centroid[0],
'munit_y': munit.centroid[1], 'munit_z': munit.centroid[2]}
self.Unit().insert1(new_unit)
for subunit_key in munit.keys:
new_match = {**key, 'munit_id': munit_id, **subunit_key,
'scan_session': subunit_key['session']}
self.Match().insert1(new_match, ignore_extra_fields=True)
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
fig = (StackSet() & key).plot_centroids3d()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename)
plt.close(fig)
msg = ('StackSet for {animal_id}-{stack_session}-{stack_idx}: {num_units} final '
'units').format(**key, num_units=len(self.Unit & key))
slack_user = notify.SlackUser & (experiment.Session & key &
{'session': key['stack_session']})
slack_user.notify(file=img_filename, file_title=msg)
def plot_centroids3d(self):
""" Plots the centroids of all units in the motor coordinate system (in microns)
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from mpl_toolkits.mplot3d import Axes3D
# Get centroids
xs, ys, zs = (StackSet.Unit & self).fetch('munit_x', 'munit_y', 'munit_z')
# Plot
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
ax.scatter(xs, ys, zs, alpha=0.5)
ax.invert_zaxis()
ax.set_xlabel('x (um)')
ax.set_ylabel('y (um)')
ax.set_zlabel('z (um)')
return fig
@schema
class Area(dj.Computed):
definition = """ # transform area masks from annotated retinotopic maps into stack space
-> PreprocessedStack.proj(stack_session='session',stack_channel='channel')
-> experiment.Scan.proj(scan_session='session')
-> shared.Channel.proj(scan_channel='channel')
-> shared.RegistrationMethod
-> shared.AreaMaskMethod
ret_idx : smallint # retinotopy map index for each animal
ret_hash : varchar(32) # single attribute representation of the key (used to avoid going over 16 attributes in the key)
---
"""
class Mask(dj.Part):
definition = """ # mask per area indicating membership
-> master
-> anatomy.Area
---
mask : blob # 2D mask of pixel area membership
"""
@property
def key_source(self):
# anatomy code outputs masks per field for aim 2pScan and per concatenated plane for aim widefield
map_rel = (anatomy.AreaMask.proj('ret_idx', scan_session='session') &
(experiment.Scan & 'aim="2pScan"').proj(scan_session='session'))
stack_rel = Registration & 'registration_method = 5'
heading = list(set(list(map_rel.heading.attributes) + list(stack_rel.heading.attributes)))
heading.remove('field')
heading.remove('brain_area')
key_source = dj.U(*heading, 'mask_method') & (map_rel * stack_rel * shared.AreaMaskMethod)
return key_source
def make(self, key):
from scipy.interpolate import griddata
import cv2
#same as key source but retains brain area attribute
key['ret_hash'] = key_hash(key)
map_rel = (anatomy.AreaMask.proj('ret_idx', scan_session='session') &
(experiment.Scan & 'aim="2pScan"').proj(stack_session='session'))
stack_rel = Registration & 'registration_method = 5'
heading = list(set(list(map_rel.heading.attributes) + list(stack_rel.heading.attributes)))
heading.remove('field')
area_keys = (dj.U(*heading, 'mask_method') & (map_rel * stack_rel * shared.AreaMaskMethod) & key).fetch('KEY')
fetch_str = ['x', 'y', 'um_width', 'um_height', 'px_width', 'px_height']
stack_rel = CorrectedStack.proj(*fetch_str, stack_session='session') & key
cent_x, cent_y, um_w, um_h, px_w, px_h = stack_rel.fetch1(*fetch_str)
# subtract edges so that all coordinates are relative to the field
stack_edges = np.array((cent_x - um_w / 2, cent_y - um_h / 2))
stack_px_dims = np.array((px_w, px_h))
stack_um_dims = np.array((um_w, um_h))
# 0.5 displacement returns the center of each pixel
stack_px_grid = np.meshgrid(*[np.arange(d) + 0.5 for d in stack_px_dims])
# for each area, transfer mask from all fields into the stack
area_masks = []
for area_key in area_keys:
mask_rel = anatomy.AreaMask & area_key
field_keys, masks = mask_rel.fetch('KEY', 'mask')
stack_masks = []
for field_key, field_mask in zip(field_keys, masks):
field_res = (meso.ScanInfo.Field & field_key).microns_per_pixel
grid_key = {**key, 'field': field_key['field']}
# fetch transformation grid using built in function
field2stack_um = (Registration & grid_key).get_grid(type='affine', desired_res=field_res)
field2stack_um = (field2stack_um[..., :2]).transpose([2, 0, 1])
# convert transformation grid into stack pixel space
field2stack_px = [(grid - edge) * px_per_um for grid, edge, px_per_um
in zip(field2stack_um, stack_edges, stack_px_dims / stack_um_dims)]
grid_locs = np.array([f2s.ravel() for f2s in field2stack_px]).T
grid_vals = field_mask.ravel()
grid_query = np.array([stack_grid.ravel() for stack_grid in stack_px_grid]).T
# griddata because scipy.interpolate.interp2d wasn't working for some reason
# linear because nearest neighbor doesn't handle nans at the edge of the image
stack_mask = griddata(grid_locs, grid_vals, grid_query, method='linear')
stack_mask = np.round(np.reshape(stack_mask, (px_h, px_w)))
stack_masks.append(stack_mask)
# flatten all masks for area
stack_masks = np.array(stack_masks)
stack_masks[np.isnan(stack_masks)] = 0
area_mask = np.max(stack_masks, axis=0)
# close gaps in mask with 100 um kernel
kernel_width = 100
kernel = np.ones(np.round(kernel_width * (stack_px_dims / stack_um_dims)).astype(int))
area_mask = cv2.morphologyEx(area_mask, cv2.MORPH_CLOSE, kernel)
area_masks.append(area_mask)
# locate areas where masks overlap and set to nan
overlap_locs = np.sum(area_masks, axis=0) > 1
# create reference map of non-overlapping area masks
mod_masks = np.stack(area_masks.copy())
mod_masks[:, overlap_locs] = np.nan
ref_mask = np.max([mm * (i + 1) for i, mm in enumerate(mod_masks)], axis=0)
# interpolate overlap pixels into reference mask
non_nan_idx = np.invert(np.isnan(ref_mask))
grid_locs = np.array([stack_grid[non_nan_idx].ravel() for stack_grid in stack_px_grid]).T
grid_vals = ref_mask[non_nan_idx].ravel()
grid_query = np.array([stack_grid[overlap_locs] for stack_grid in stack_px_grid]).T
mask_assignments = griddata(grid_locs, grid_vals, grid_query, method='nearest')
for loc, assignment in zip((np.array(grid_query) - 0.5).astype(int), mask_assignments):
mod_masks[:, loc[1], loc[0]] = 0
mod_masks[int(assignment - 1)][loc[1]][loc[0]] = 1
area_keys = [{**area_key,**key,'mask': mod_mask} for area_key, mod_mask in zip(area_keys, mod_masks)]
self.insert1(key)
self.Mask.insert(area_keys)
|
olakiril/pipeline
|
python/pipeline/stack.py
|
Python
|
lgpl-3.0
| 136,839
|
[
"Gaussian"
] |
2ab1f6914b8c6b9081d5ee0e669994f6a5821e40a79fce1146585e59c7a83048
|
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
import sas.qtgui.Utilities.GuiUtils as GuiUtils
class ModelViewDelegate(QtWidgets.QStyledItemDelegate):
"""
Custom delegate for appearance and behavior control of the model view
"""
def __init__(self, parent=None):
"""
Overwrite generic constructor to allow for some globals
"""
super(ModelViewDelegate, self).__init__()
# Main parameter table view columns
self.param_error=-1
self.param_property=0
self.param_value=1
self.param_min=2
self.param_max=3
self.param_unit=4
def fancyColumns(self):
return [self.param_value, self.param_min, self.param_max, self.param_unit]
def addErrorColumn(self):
"""
Modify local column pointers
Note: the reverse is never required!
"""
self.param_property=0
self.param_value=1
self.param_error=2
self.param_min=3
self.param_max=4
self.param_unit=5
def paint(self, painter, option, index):
"""
Overwrite generic painter for certain columns
"""
if index.column() in self.fancyColumns():
# Units - present in nice HTML
options = QtWidgets.QStyleOptionViewItem(option)
self.initStyleOption(options,index)
style = QtWidgets.QApplication.style() if options.widget is None else options.widget.style()
# Prepare document for inserting into cell
doc = QtGui.QTextDocument()
# Convert the unit description into HTML
text_html = GuiUtils.convertUnitToHTML(str(options.text))
doc.setHtml(text_html)
doc.setDocumentMargin(1)
# delete the original content
options.text = ""
style.drawControl(QtWidgets.QStyle.CE_ItemViewItem, options, painter, options.widget);
context = QtGui.QAbstractTextDocumentLayout.PaintContext()
textRect = style.subElementRect(QtWidgets.QStyle.SE_ItemViewItemText, options)
painter.save()
rect = textRect.topLeft()
x = rect.x()
y = rect.y()
x += 3.0 # magic value for rendering nice display in the table
y += 2.0 # magic value for rendering nice display in the table
rect.setX(x)
rect.setY(y)
painter.translate(rect)
painter.setClipRect(textRect.translated(-rect))
# Draw the QTextDocument in the cell
doc.documentLayout().draw(painter, context)
painter.restore()
else:
# Just the default paint
QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
def createEditor(self, widget, option, index):
"""
Overwrite generic editor for certain columns
"""
if not index.isValid():
return 0
if index.column() == self.param_value: #only in the value column
editor = QtWidgets.QLineEdit(widget)
validator = GuiUtils.DoubleValidator()
editor.setValidator(validator)
return editor
if index.column() in [self.param_property, self.param_error, self.param_unit]:
# Set some columns uneditable
return None
if index.column() in (self.param_min, self.param_max):
# Check if the edit role is set
if not (index.flags() & QtCore.Qt.ItemIsEditable):
return None
return super(ModelViewDelegate, self).createEditor(widget, option, index)
def setModelData(self, editor, model, index):
"""
Overwrite generic model update method for certain columns
"""
if index.column() in (self.param_min, self.param_max):
try:
value_float = float(editor.text())
except ValueError:
# TODO: present the failure to the user
# balloon popup? tooltip? cell background colour flash?
return
QtWidgets.QStyledItemDelegate.setModelData(self, editor, model, index)
class PolyViewDelegate(QtWidgets.QStyledItemDelegate):
"""
Custom delegate for appearance and behavior control of the polydispersity view
"""
POLYDISPERSE_FUNCTIONS = ['rectangle', 'array', 'lognormal', 'gaussian', 'schulz']
combo_updated = QtCore.pyqtSignal(str, int)
filename_updated = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
"""
Overwrite generic constructor to allow for some globals
"""
super(PolyViewDelegate, self).__init__()
self.poly_parameter = 0
self.poly_pd = 1
self.poly_error = None
self.poly_min = 2
self.poly_max = 3
self.poly_npts = 4
self.poly_nsigs = 5
self.poly_function = 6
self.poly_filename = 7
def editableParameters(self):
return [self.poly_pd, self.poly_min, self.poly_max, self.poly_npts, self.poly_nsigs]
def columnDict(self):
return {self.poly_pd: 'width',
self.poly_min: 'min',
self.poly_max: 'max',
self.poly_npts: 'npts',
self.poly_nsigs: 'nsigmas'}
def addErrorColumn(self):
"""
Modify local column pointers
Note: the reverse is never required!
"""
self.poly_parameter = 0
self.poly_pd = 1
self.poly_error = 2
self.poly_min = 3
self.poly_max = 4
self.poly_npts = 5
self.poly_nsigs = 6
self.poly_function = 7
self.poly_filename = 8
def createEditor(self, widget, option, index):
# Remember the current choice
if not index.isValid():
return 0
elif index.column() == self.poly_filename:
# Notify the widget that we want to change the filename
self.filename_updated.emit(index.row())
return None
elif index.column() in self.editableParameters():
self.editor = QtWidgets.QLineEdit(widget)
validator = GuiUtils.DoubleValidator()
self.editor.setValidator(validator)
return self.editor
else:
QtWidgets.QStyledItemDelegate.createEditor(self, widget, option, index)
def paint(self, painter, option, index):
"""
Overwrite generic painter for certain columns
"""
if index.column() in (self.poly_pd, self.poly_min, self.poly_max):
# Units - present in nice HTML
options = QtWidgets.QStyleOptionViewItem(option)
self.initStyleOption(options,index)
style = QtWidgets.QApplication.style() if options.widget is None else options.widget.style()
# Prepare document for inserting into cell
doc = QtGui.QTextDocument()
current_font = painter.font()
doc.setDefaultFont(current_font)
# Convert the unit description into HTML
text_html = GuiUtils.convertUnitToHTML(str(options.text))
doc.setHtml(text_html)
# delete the original content
options.text = ""
style.drawControl(QtWidgets.QStyle.CE_ItemViewItem, options, painter, options.widget);
context = QtGui.QAbstractTextDocumentLayout.PaintContext()
textRect = style.subElementRect(QtWidgets.QStyle.SE_ItemViewItemText, options)
painter.save()
rect = textRect.topLeft()
y = rect.y()
y += 5.0 # magic value for rendering nice display in the table
rect.setY(y)
painter.translate(rect)
painter.setClipRect(textRect.translated(-rect))
# Draw the QTextDocument in the cell
doc.documentLayout().draw(painter, context)
painter.restore()
else:
# Just the default paint
QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
class MagnetismViewDelegate(QtWidgets.QStyledItemDelegate):
"""
Custom delegate for appearance and behavior control of the magnetism view
"""
def __init__(self, parent=None):
"""
Overwrite generic constructor to allow for some globals
"""
super(MagnetismViewDelegate, self).__init__()
self.mag_parameter = 0
self.mag_value = 1
self.mag_min = 2
self.mag_max = 3
self.mag_unit = 4
def editableParameters(self):
return [self.mag_value, self.mag_min, self.mag_max]
def addErrorColumn(self):
"""
Modify local column pointers
Note: the reverse is never required!
"""
self.mag_parameter = 0
self.mag_value = 1
self.mag_min = 3
self.mag_max = 4
self.mag_unit = 5
def createEditor(self, widget, option, index):
# Remember the current choice
current_text = index.data()
if not index.isValid():
return 0
if index.column() in self.editableParameters():
editor = QtWidgets.QLineEdit(widget)
validator = GuiUtils.DoubleValidator()
editor.setValidator(validator)
return editor
else:
QtWidgets.QStyledItemDelegate.createEditor(self, widget, option, index)
def paint(self, painter, option, index):
"""
Overwrite generic painter for certain columns
"""
if index.column() in (self.mag_value, self.mag_min, self.mag_max, self.mag_unit):
# Units - present in nice HTML
options = QtWidgets.QStyleOptionViewItem(option)
self.initStyleOption(options,index)
style = QtWidgets.QApplication.style() if options.widget is None else options.widget.style()
# Prepare document for inserting into cell
doc = QtGui.QTextDocument()
current_font = painter.font()
doc.setDefaultFont(current_font)
# Convert the unit description into HTML
text_html = GuiUtils.convertUnitToHTML(str(options.text))
doc.setHtml(text_html)
# delete the original content
options.text = ""
style.drawControl(QtWidgets.QStyle.CE_ItemViewItem, options, painter, options.widget);
context = QtGui.QAbstractTextDocumentLayout.PaintContext()
textRect = style.subElementRect(QtWidgets.QStyle.SE_ItemViewItemText, options)
painter.save()
rect = textRect.topLeft()
y = rect.y()
y += 6.0 # magic value for rendering nice display in the table
rect.setY(y)
painter.translate(rect)
painter.setClipRect(textRect.translated(-rect))
# Draw the QTextDocument in the cell
doc.documentLayout().draw(painter, context)
painter.restore()
else:
# Just the default paint
QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
|
SasView/sasview
|
src/sas/qtgui/Perspectives/Fitting/ViewDelegate.py
|
Python
|
bsd-3-clause
| 11,111
|
[
"Gaussian"
] |
d40b78b02143369bd093822639fcb692c48f6e850fdcbd6414ecac3faa0730e4
|
# This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2009, 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mysite.base.tests import TwillTests
import mysite.project.controllers
import mysite.account.tests
from mysite.search.models import Project
from mysite.profile.models import Person, PortfolioEntry
import mysite.project.views
import mysite.profile.views
import mysite.profile.models
import mysite.profile.controllers
from mysite.base.tests import better_make_twill_url
import mock
import urlparse
import datetime
from django.core.urlresolvers import reverse
from twill import commands as tc
class ProjectNameSearch(TwillTests):
def test_search_for_similar_project_names_backend(self):
# Create one relevant, one irrelevant project
mysite.search.models.Project.create_dummy(name='Twisted System')
mysite.search.models.Project.create_dummy(name='Irrelevant')
# Call out function, hoping to find Twisted System
starts_with_twisted = mysite.project.controllers.similar_project_names(
'Twisted')
self.assertEqual(['Twisted System'], [p.name for p in starts_with_twisted])
# Same with lowercase name
starts_with_twisted = mysite.project.controllers.similar_project_names(
'twistEd')
self.assertEqual(['Twisted System'], [p.name for p in starts_with_twisted])
def test_search_for_one_matching_project_name(self):
# If there's an exactly-matching project name, we redirect to that project's page
# (instead of showing search results).
mysite.search.models.Project.create_dummy(name='Twisted System')
response = self.client.get('/+projects/',
{'q': 'twiSted SysTem'},
follow=True)
self.assertEqual(response.redirect_chain,
[('http://testserver/+projects/Twisted%20System', 302)])
def test_form_sends_data_to_get(self):
# This test will fail if a query that selects one project but doesn't
# equal the project's name causes a redirect.
# First, create the project that we will refer to below.
mysite.search.models.Project.create_dummy(name='Twisted System')
tc.go(better_make_twill_url('http://openhatch.org/+projects'))
query = 'Twisted'
tc.fv(1, 'search_q', query)
tc.submit()
tc.url('\?q=Twisted') # Assert that URL contains this substring.
tc.find(query)
def test_template_get_matching_projects(self):
mysite.search.models.Project.create_dummy(name='Twisted System')
mysite.search.models.Project.create_dummy(name='Twisted Orange Drinks')
response = self.client.get('/+projects/',
{'q': 'Twisted'},
follow=True)
matching_projects = response.context[0]['matching_projects']
self.assertEqual(
sorted([p.name for p in matching_projects]),
sorted(['Twisted Orange Drinks', 'Twisted System']))
class ProjectList(TwillTests):
def test_it_generally_works(self):
self.client.get('/+projects/')
class ProjectPageCreation(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus']
@mock.patch('mysite.search.models.Project.populate_icon_from_ohloh')
@mock.patch('mysite.search.tasks.PopulateProjectLanguageFromOhloh')
def test_post_handler(self, mock_populate_icon, mock_populate_language):
# Show that it works
project_name = 'Something novel'
self.assertFalse(mysite.search.models.Project.objects.filter(name=project_name))
client = self.login_with_client()
response = client.post(reverse(mysite.project.views.create_project_page_do),
{'project_name': project_name}, follow=True)
# We successfully made the project...
self.assert_(mysite.search.models.Project.objects.filter(name=project_name))
# and redirected to the editor.
self.assertEqual(response.redirect_chain,
[('http://testserver/+projedit/Something%20novel', 302)])
# FIXME: Enqueue a job into the session to have this user take ownership
# of this Project.
# This could easily be a log for edits.
@mock.patch('mysite.search.models.Project.populate_icon_from_ohloh')
@mock.patch('mysite.search.tasks.PopulateProjectLanguageFromOhloh')
def test_project_creator_simply_redirects_to_project_if_it_exists(
self, mock_populate_icon, mock_populate_language):
# Show that it works
project_name = 'Something novel'
Project.create_dummy(name=project_name.lower())
# See? We have our project in the database (with slightly different case, but still)
self.assertEqual(1, len(mysite.search.models.Project.objects.all()))
response = self.client.post(reverse(mysite.project.views.create_project_page_do),
{'project_name': project_name}, follow=True)
# And we still have exactly that one project in the database.
self.assertEqual(1, len(mysite.search.models.Project.objects.all()))
# and redirected.
self.assertEqual(response.redirect_chain,
[('http://testserver/+projects/something%20novel', 302)])
def test_form_on_project_search_page_submits_to_project_creation_post_handler(self):
project_search_page_url = better_make_twill_url(
"http://openhatch.org%s?q=newproject" % reverse(mysite.project.views.projects))
tc.go(project_search_page_url)
# Fill form out with slightly different project name, which we
# anticipate happening sometimes
tc.fv('create_project', 'project_name', 'NewProject')
tc.submit()
post_handler_url = reverse(mysite.project.views.create_project_page_do)
import re
tc.url(re.escape(post_handler_url))
class ButtonClickMarksSomeoneAsWannaHelp(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test_mark_as_wanna_help(self):
person = Person.objects.get(user__username='paulproteus')
p_before = Project.create_dummy()
self.assertFalse(mysite.search.models.WannaHelperNote.objects.all())
self.assertFalse(p_before.people_who_wanna_help.all())
client = self.login_with_client()
post_to = reverse(mysite.project.views.wanna_help_do)
client.post(post_to, {u'project': unicode(p_before.pk)})
p_after = Project.objects.get(pk=p_before.pk)
self.assertEqual(
list(p_after.people_who_wanna_help.all()),
[person])
note = mysite.search.models.WannaHelperNote.objects.get()
self.assertEqual(note.person, person)
self.assertEqual(note.project, p_after)
def test_unmark_as_wanna_help(self):
# We're in there...
person = Person.objects.get(user__username='paulproteus')
p_before = Project.create_dummy()
p_before.people_who_wanna_help.add(person)
p_before.save()
mysite.search.models.WannaHelperNote.add_person_project(person, p_before)
# Submit that project to unlist_self_from_wanna_help_do
client = self.login_with_client()
post_to = reverse(mysite.project.views.unlist_self_from_wanna_help_do)
client.post(post_to, {u'project': unicode(p_before.pk)})
# Are we gone yet?
p_after = Project.objects.get(pk=p_before.pk)
self.assertFalse(p_after.people_who_wanna_help.all())
def test_mark_as_contacted(self):
person = Person.objects.get(user__username='paulproteus')
p_before = Project.create_dummy()
p_before.people_who_wanna_help.add(person)
p_before.save()
mysite.search.models.WannaHelperNote.add_person_project(person, p_before)
client = self.login_with_client()
post_to = reverse(mysite.project.views.mark_contacted_do)
vars = {u'mark_contact-project': unicode(p_before.pk),
u'helper-%s-checked' % (person.pk,) : unicode('on'),
u'helper-%s-person_id' % (person.pk) : unicode(person.pk),
u'helper-%s-project_id' % (person.pk) : unicode(p_before.pk)}
client.post(post_to, vars)
whn_after = mysite.search.models.WannaHelperNote.objects.get(person=person, project=p_before)
self.assertTrue(whn_after.contacted_on)
self.assertTrue(whn_after.contacted_by, datetime.date.today())
class WannaHelpSubmitHandlesNoProjectIdGracefully(TwillTests):
def test(self):
# Submit nothing.
post_to = reverse(mysite.project.views.wanna_help_do)
response = self.client.post(post_to, {}, follow=True)
self.assertEqual(response.status_code, 400)
class WannaHelpWorksAnonymously(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test_mark_as_helper_anonymously(self):
# Steps for this test
# 1. User fills in the form anonymously
# 2. We test that the Answer is not yet saved
# 3. User logs in
# 4. We test that the Answer is saved
project_id = Project.create_dummy(name='Myproject').id
# At the start, no one wants to help our project.
self.assertFalse(Project.objects.get(id=project_id).people_who_wanna_help.all())
# Click the button saying we want to help!
post_to = reverse(mysite.project.views.wanna_help_do)
response = self.client.post(post_to, {u'project': unicode(project_id)}, follow=True)
# Make sure we are redirected to the right place
self.assertEqual(response.redirect_chain,
[('http://testserver/account/login/?next=%2F%2Bprojects%2FMyproject%3Fwanna_help%3Dtrue', 302)])
# check that the session can detect that we want to help Ubuntu out
self.assertEqual(self.client.session['projects_we_want_to_help_out'],
[project_id])
# According to the database, no one wants to help our project.
self.assertFalse(Project.objects.get(id=project_id).people_who_wanna_help.all())
# But when the user is logged in and *then* visits the project page
login_worked = self.client.login(username='paulproteus',
password="paulproteus's unbreakable password")
self.assert_(login_worked)
# Visit the project page...
self.client.get(Project.objects.get(id=project_id).get_url())
# After the GET, we've removed our note in the session
self.assertFalse(self.client.session.get('projects_we_want_to_help_out', None))
# then the DB knows the user wants to help out!
self.assertEqual(list(Project.objects.get(id=project_id).people_who_wanna_help.all()),
[Person.objects.get(user__username='paulproteus')])
self.assert_(mysite.search.models.WannaHelperNote.objects.all())
# Say we're not interested anymore.
post_to = reverse(mysite.project.views.unlist_self_from_wanna_help_do)
response = self.client.post(post_to, {u'project': unicode(project_id)}, follow=True)
# And now the DB shows we have removed ourselves.
self.assertFalse(Project.objects.get(id=project_id).people_who_wanna_help.all())
self.assertFalse(mysite.search.models.WannaHelperNote.objects.all())
class ProjectPageTellsNextStepsForHelpersToBeExpanded(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus',
'miro-project']
def test_default_to_false(self): # FIXME: Make it default to True soon
client = self.login_with_client()
response = client.get('/+projects/Miro')
self.assertFalse(response.context[0].get(
'expand_next_steps', None))
class OffsiteAnonymousWannaHelpWorks(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test(self):
# Steps for this test
# 1. User POSTs to the wannahelp POST handler, indicating the request came from offsite
# 2. User is redirected to a login page that knows the request came from offsite
project_id = Project.create_dummy(name='Myproject').id
# At the start, no one wants to help our project.
self.assertFalse(Project.objects.get(id=project_id).people_who_wanna_help.all())
# Click the button saying we want to help!
post_to = reverse(mysite.project.views.wanna_help_do)
response = self.client.post(post_to,
{u'project': unicode(project_id),
u'from_offsite': u'True'}, follow=True)
# Make sure the session knows we came from offsite. Login-related
# templates want this knowledge.
self.assert_(self.client.session.get('from_offsite', False))
## FIXME: There should be a cancel button letting the user
## destroy the session and then go back to the Referring page.
# Make sure we are redirected to the right place
self.assertEqual(response.redirect_chain,
[('http://testserver/account/login/?next=%2F%2Bprojects%2FMyproject%3Fwanna_help%3Dtrue', 302)])
lucky_projects = mysite.project.controllers.get_wanna_help_queue_from_session(self.client.session)
self.assertEqual([k.name for k in lucky_projects], ['Myproject'])
class DecideWhichProjectDescriptionsAppearOnProjectPage(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus', 'user-barry', 'person-barry']
def test(self):
# Create a project.
project = Project.create_dummy()
# Create two profiles, each with a PortfolioEntry linking it to the
# project, each with descriptions.
def create_pfe_with_description(username):
return PortfolioEntry.create_dummy(project=project,
person=Person.get_by_username(username),
is_published=True)
pfes = {'uncheck_me': create_pfe_with_description('paulproteus'),
'keep_me_checked': create_pfe_with_description('barry')}
# Get a list of the PortfolioEntries that we use to get a random project
# description for the project page.
descriptions = project.get_pfentries_with_usable_descriptions()
# Observe that the list contains both PortfolioEntries.
for entry in pfes.values():
self.assert_(entry in descriptions)
self.login_with_twill()
# Go to the project page.
url = urlparse.urljoin("http://openhatch.org", project.get_edit_page_url())
tc.go(better_make_twill_url(url))
# In preparation for the next set of assertions, make sure that the
# entries don't have the same description.
self.assertNotEqual(
pfes['uncheck_me'].project_description,
pfes['keep_me_checked'].project_description)
# See a list of project descriptions on the page, which equals the list of
# descriptions in the DB.
for entry in pfes.values():
tc.find(entry.project_description)
# Uncheck one of the checkboxes and submit the form
name_of_checkbox_to_uncheck = "%s-use_my_description" % pfes['uncheck_me'].pk
tc.fv("2", name_of_checkbox_to_uncheck, False)
tc.submit()
# Get a list of the PortfolioEntries that we use to get a random project
# description for the project page.
good_pfentries = project.get_pfentries_with_usable_descriptions()
# Observe that the list contains only the checked PortfolioEntry.
self.assert_(pfes['uncheck_me'] not in good_pfentries)
self.assert_(pfes['keep_me_checked'] in good_pfentries)
class BugTrackersOnProjectEditPage(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus', 'user-barry', 'person-barry']
def setUp(self):
super(BugTrackersOnProjectEditPage, self).setUp()
self.twisted = mysite.search.models.Project.create_dummy(name='Twisted System')
def test_empty_at_start(self):
self.assertFalse(self.twisted.get_corresponding_bug_trackers())
def test_trackers_created_for_project_show_up(self):
# Create a Roundup model
bug_tracker = mysite.customs.models.RoundupTrackerModel(
tracker_name='dummy',
base_url='http://example.com/',
closed_status='resolved')
bug_tracker.created_for_project = self.twisted
bug_tracker.save()
# Now, the Twisted project should have one corresponding bug tracker
trackers_from_project = list(self.twisted.get_corresponding_bug_trackers())
self.assertEqual([bug_tracker], trackers_from_project)
|
mzdaniel/oh-mainline
|
mysite/project/tests.py
|
Python
|
agpl-3.0
| 17,510
|
[
"VisIt"
] |
29bec4233a14b0484e520e5299b0c2bb432efedf07e1d04ebbe44d2d60117f82
|
# -*- coding: utf-8 -*-
#
# spike_analysis.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Merges spike files, produces raster plots, calculates and plots firing rates
import numpy as np
import glob
import matplotlib.pyplot as plt
import os
import re
datapath = '.'
# get simulation time and numbers of neurons recorded from sim_params.sli
with open(os.path.join(datapath, 'sim_params.sli'), 'r') as f:
sim_params_contents = f.read()
T = float(re.search(r'/t_sim (.+) def', sim_params_contents).group(1))
record_frac = re.search(r'/record_fraction_neurons_spikes (.+) def', sim_params_contents).group(1) == 'true'
if record_frac:
frac_rec = float(re.search(r'/frac_rec_spikes (.+) def', sim_params_contents).group(1))
else:
n_rec = int(re.search(r'/n_rec_spikes (.+) def', sim_params_contents).group(1))
T_start = 200. # starting point of analysis (to avoid transients)
# load node IDs
node_ids = np.loadtxt(os.path.join(datapath, 'population_nodeIDs.dat'), dtype=int)
print('Global IDs:')
print(node_ids)
print()
# number of populations
num_pops = len(node_ids)
print('Number of populations:')
print(num_pops)
print()
# first node ID in each population
raw_first_node_ids = [node_ids[i][0] for i in np.arange(len(node_ids))]
# population sizes
pop_sizes = [node_ids[i][1] - node_ids[i][0] + 1 for i in np.arange(len(node_ids))]
# numbers of neurons for which spikes were recorded
if record_frac:
rec_sizes = [int(pop_sizes[i] * frac_rec) for i in range(len(pop_sizes))]
else:
rec_sizes = [n_rec] * len(pop_sizes)
# first node ID of each population once device node IDs are dropped
first_node_ids = [int(1 + np.sum(pop_sizes[:i]))
for i in np.arange(len(pop_sizes))]
# last node ID of each population once device node IDs are dropped
last_node_ids = [int(np.sum(pop_sizes[:i + 1]))
for i in np.arange(len(pop_sizes))]
# convert lists to a nicer format, i.e. [[2/3e, 2/3i], []....]
Pop_sizes = [pop_sizes[i:i + 2] for i in range(0, len(pop_sizes), 2)]
print('Population sizes:')
print(Pop_sizes)
print()
Raw_first_node_ids = [raw_first_node_ids[i:i + 2] for i in range(0, len(raw_first_node_ids), 2)]
First_node_ids = [first_node_ids[i:i + 2] for i in range(0, len(first_node_ids), 2)]
Last_node_ids = [last_node_ids[i:i + 2] for i in range(0, len(last_node_ids), 2)]
# total number of neurons in the simulation
num_neurons = last_node_ids[len(last_node_ids) - 1]
print('Total number of neurons:')
print(num_neurons)
print()
# load spikes from gdf files, correct node IDs and merge them in population files,
# and store spike trains
# will contain neuron id resolved spike trains
neuron_spikes = [[] for i in np.arange(num_neurons + 1)]
# container for population-resolved spike data
spike_data = [[[], []], [[], []], [[], []], [[], []], [[], []], [[], []],
[[], []], [[], []]]
counter = 0
for layer in ['0', '1', '2', '3']:
for population in ['0', '1']:
output = os.path.join(datapath,
'population_spikes-{}-{}.gdf'.format(layer,
population))
file_pattern = os.path.join(datapath,
'spikes_{}_{}*'.format(layer, population))
files = glob.glob(file_pattern)
print('Merge ' + str(
len(files)) + ' spike files from L' + layer + 'P' + population)
if files:
merged_file = open(output, 'w')
for file in files:
data = open(file, 'r')
nest_version = next(data)
backend_version = next(data)
column_header = next(data)
for line in data:
a = line.split()
a[0] = int(a[0])
a[1] = float(a[1])
raw_first_node_id = Raw_first_node_ids[int(layer)][int(population)]
first_node_id = First_node_ids[int(layer)][int(population)]
a[0] = a[0] - raw_first_node_id + first_node_id
if (a[1] > T_start): # discard data in the start-up phase
spike_data[counter][0].append(num_neurons - a[0])
spike_data[counter][1].append(a[1] - T_start)
neuron_spikes[a[0]].append(a[1] - T_start)
converted_line = str(a[0]) + '\t' + str(a[1]) + '\n'
merged_file.write(converted_line)
data.close()
merged_file.close()
counter += 1
clrs = ['0', '0.5', '0', '0.5', '0', '0.5', '0', '0.5']
plt.ion()
# raster plot
plt.figure(1)
counter = 1
for j in np.arange(num_pops):
for i in np.arange(first_node_ids[j], first_node_ids[j] + rec_sizes[j]):
plt.plot(neuron_spikes[i],
np.ones_like(neuron_spikes[i]) + sum(rec_sizes) - counter,
'k o', ms=1, mfc=clrs[j], mec=clrs[j])
counter += 1
plt.xlim(0, T - T_start)
plt.ylim(0, sum(rec_sizes))
plt.xlabel(r'time (ms)')
plt.ylabel(r'neuron id')
plt.savefig(os.path.join(datapath, 'rasterplot.png'))
# firing rates
rates = []
temp = 0
for i in np.arange(num_pops):
for j in np.arange(first_node_ids[i], last_node_ids[i]):
temp += len(neuron_spikes[j])
rates.append(temp / (rec_sizes[i] * (T - T_start)) * 1e3)
temp = 0
print()
print('Firing rates:')
print(rates)
plt.figure(2)
ticks = np.arange(num_pops)
plt.bar(ticks, rates, width=0.9, color='k')
xticklabels = ['L2/3e', 'L2/3i', 'L4e', 'L4i', 'L5e', 'L5i', 'L6e', 'L6i']
plt.setp(plt.gca(), xticks=ticks + 0.5, xticklabels=xticklabels)
plt.xlabel(r'subpopulation')
plt.ylabel(r'firing rate (spikes/s)')
plt.savefig(os.path.join(datapath, 'firing_rates.png'))
plt.show()
|
sanjayankur31/nest-simulator
|
examples/nest/Potjans_2014/spike_analysis.py
|
Python
|
gpl-2.0
| 6,449
|
[
"NEURON"
] |
e11a0a1379df3020ab305782d06b7338a54cb02bf90a50825a9ca6b537ebf74d
|
# -*- coding: utf-8 -*-
#
# test_erfc_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Test implementation of erfc-neuron.
"""
import unittest
import nest
import numpy as np
from scipy.special import erfc
def get_mean_activity(detector, T):
"""
returns the mean activity of a single binary neuron connected to a spin
detector.
"""
states = nest.GetStatus(detector)[0]['events']['state']
times = nest.GetStatus(detector)[0]['events']['times']
# add total duration at the end, since we need to take into account
# the time between the last state change and end of simulation
times = np.hstack((times, T))
if len(times) > 1:
assert(states[0] == 1)
# since neuron is starting in 0 state, summing every second period
# will give us the total time in the up state
activity = np.sum(np.diff(times)[::2]) / (T - times[0])
# if we have more than one update, we calculate a more accurate value
# for the mean activity, taking into account that our measurements are
# biased (we only record /given/ that a state change happened)
if len(times) > 2:
# biased average starting at down state, p(m(t)=1|m(t-1)=0)
M0 = 1. - (np.sum(np.diff(times)[1::2]) / (T - times[1]))
# biased average starting at up state, p(m(t)=1|m(t-1)=1)
M1 = activity
# unbiased estimate,
# p(m(t)=1)=\sum_{s \in {0,1}} p(m(t)=1|m(t-1)=s)p(m(t-1)=s),
# assuming stationary state: p(m(t)) = p(m(t-1)), solved for p(m=1)
activity = M0 / (1. + M0 - M1)
return activity
else:
return 0.
def activation_function_theory(sigma, theta):
"""
returns the probability for a binary neuron to be in the up state, given
the parameters sigma and theta.
"""
return 0.5 * erfc(theta / (np.sqrt(2.) * sigma))
class ErfcNeuronTheoryTestCase(unittest.TestCase):
"""Compare results to theoretical predictions"""
def setUp(self):
"""defines parameters of simulation"""
self.sigma = np.logspace(-1, 1.1, 4)
self.theta = np.linspace(-6, 6, 15)
self.neuron = None
self.detector = None
self.T = 50000.
def build_and_connect_nodes(self, sigma, theta):
""" sets up an erfc neuron and spin detector. """
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
nest.SetKernelStatus({'rng_seed': 1})
self.neuron = nest.Create('erfc_neuron', 1,
{'sigma': sigma, 'theta': theta})
self.detector = nest.Create('spin_detector', 1)
nest.Connect(self.neuron, self.detector)
def test_activation_function(self):
"""
simulates erfc neuron for different parameter sets and compares
activity to theoretical value.
"""
for sigma in self.sigma:
for theta in self.theta:
self.build_and_connect_nodes(sigma, theta)
nest.Simulate(self.T)
mean_activity = get_mean_activity(self.detector, self.T)
mean_activity_theory = activation_function_theory(sigma, theta)
delta = np.max([2e-1 * mean_activity_theory *
(1. -
mean_activity_theory),
1e-2])
self.assertAlmostEqual(
mean_activity,
mean_activity_theory,
delta=delta)
def suite():
suite1 = unittest.TestLoader().loadTestsFromTestCase(
ErfcNeuronTheoryTestCase)
return unittest.TestSuite([suite1])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
lekshmideepu/nest-simulator
|
testsuite/pytests/test_erfc_neuron.py
|
Python
|
gpl-2.0
| 4,456
|
[
"NEURON"
] |
ba51c4fa70f2dc45bd9fe7e53dabac8ad3135b3c8d7e73e33cac7bf709f3ca64
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
import json
import random
import re
from ..compat import (
compat_parse_qs,
compat_str,
)
from ..utils import (
js_to_json,
strip_jsonp,
urlencode_postdata,
)
class WeiboIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?weibo\.com/[0-9]+/(?P<id>[a-zA-Z0-9]+)'
_TEST = {
'url': 'https://weibo.com/6275294458/Fp6RGfbff?type=comment',
'info_dict': {
'id': 'Fp6RGfbff',
'ext': 'mp4',
'title': 'You should have servants to massage you,... 来自Hosico_猫 - 微博',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
# to get Referer url for genvisitor
webpage, urlh = self._download_webpage_handle(url, video_id)
visitor_url = urlh.geturl()
if 'passport.weibo.com' in visitor_url:
# first visit
visitor_data = self._download_json(
'https://passport.weibo.com/visitor/genvisitor', video_id,
note='Generating first-visit data',
transform_source=strip_jsonp,
headers={'Referer': visitor_url},
data=urlencode_postdata({
'cb': 'gen_callback',
'fp': json.dumps({
'os': '2',
'browser': 'Gecko57,0,0,0',
'fonts': 'undefined',
'screenInfo': '1440*900*24',
'plugins': '',
}),
}))
tid = visitor_data['data']['tid']
cnfd = '%03d' % visitor_data['data']['confidence']
self._download_webpage(
'https://passport.weibo.com/visitor/visitor', video_id,
note='Running first-visit callback',
query={
'a': 'incarnate',
't': tid,
'w': 2,
'c': cnfd,
'cb': 'cross_domain',
'from': 'weibo',
'_rand': random.random(),
})
webpage = self._download_webpage(
url, video_id, note='Revisiting webpage')
title = self._html_search_regex(
r'<title>(.+?)</title>', webpage, 'title')
video_formats = compat_parse_qs(self._search_regex(
r'video-sources=\\\"(.+?)\"', webpage, 'video_sources'))
formats = []
supported_resolutions = (480, 720)
for res in supported_resolutions:
vid_urls = video_formats.get(compat_str(res))
if not vid_urls or not isinstance(vid_urls, list):
continue
vid_url = vid_urls[0]
formats.append({
'url': vid_url,
'height': res,
})
self._sort_formats(formats)
uploader = self._og_search_property(
'nick-name', webpage, 'uploader', default=None)
return {
'id': video_id,
'title': title,
'uploader': uploader,
'formats': formats
}
class WeiboMobileIE(InfoExtractor):
_VALID_URL = r'https?://m\.weibo\.cn/status/(?P<id>[0-9]+)(\?.+)?'
_TEST = {
'url': 'https://m.weibo.cn/status/4189191225395228?wm=3333_2001&sourcetype=weixin&featurecode=newtitle&from=singlemessage&isappinstalled=0',
'info_dict': {
'id': '4189191225395228',
'ext': 'mp4',
'title': '午睡当然是要甜甜蜜蜜的啦',
'uploader': '柴犬柴犬'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
# to get Referer url for genvisitor
webpage = self._download_webpage(url, video_id, note='visit the page')
weibo_info = self._parse_json(self._search_regex(
r'var\s+\$render_data\s*=\s*\[({.*})\]\[0\]\s*\|\|\s*{};',
webpage, 'js_code', flags=re.DOTALL),
video_id, transform_source=js_to_json)
status_data = weibo_info.get('status', {})
page_info = status_data.get('page_info')
title = status_data['status_title']
uploader = status_data.get('user', {}).get('screen_name')
return {
'id': video_id,
'title': title,
'uploader': uploader,
'url': page_info['media_info']['stream_url']
}
|
vinegret/youtube-dl
|
youtube_dl/extractor/weibo.py
|
Python
|
unlicense
| 4,493
|
[
"VisIt"
] |
118ae5ae56f5b06ac111887c0af2f424593e15477409989441a7ae9c3ffd2afb
|
"""
Photometric Redshifts via Linear Regression
-------------------------------------------
Linear Regression for photometric redshifts
We could use sklearn.linear_model.LinearRegression, but to be more
transparent, we'll do it by hand using linear algebra.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import itertools
import numpy as np
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics.pairwise import euclidean_distances
from astroML.datasets import fetch_sdss_specgals
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
np.random.seed(0)
data = fetch_sdss_specgals()
# put magnitudes in a matrix
# with a constant (for the intercept) at position zero
mag = np.vstack([np.ones(data.shape)]
+ [data['modelMag_%s' % f] for f in 'ugriz']).T
z = data['z']
# train on ~60,000 points
mag_train = mag[::10]
z_train = z[::10]
# test on ~6,000 distinct points
mag_test = mag[1::100]
z_test = z[1::100]
def plot_results(z, z_fit, plotlabel=None,
xlabel=True, ylabel=True):
plt.scatter(z, z_fit, s=1, lw=0, c='k')
plt.plot([-0.1, 0.4], [-0.1, 0.4], ':k')
plt.xlim(-0.05, 0.4001)
plt.ylim(-0.05, 0.4001)
plt.gca().xaxis.set_major_locator(plt.MultipleLocator(0.1))
plt.gca().yaxis.set_major_locator(plt.MultipleLocator(0.1))
if plotlabel:
plt.text(0.03, 0.97, plotlabel,
ha='left', va='top', transform=ax.transAxes)
if xlabel:
plt.xlabel(r'$\rm z_{true}$')
else:
plt.gca().xaxis.set_major_formatter(plt.NullFormatter())
if ylabel:
plt.ylabel(r'$\rm z_{fit}$')
else:
plt.gca().yaxis.set_major_formatter(plt.NullFormatter())
def combinations_with_replacement(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in itertools.product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def poly_features(X, p):
"""Compute polynomial features
Parameters
----------
X: array_like
shape (n_samples, n_features)
p: int
degree of polynomial
Returns
-------
X_p: array
polynomial feature matrix
"""
X = np.asarray(X)
N, D = X.shape
ind = list(combinations_with_replacement(range(D), p))
X_poly = np.empty((X.shape[0], len(ind)))
for i in range(len(ind)):
X_poly[:, i] = X[:, ind[i]].prod(1)
return X_poly
def gaussian_RBF_features(X, centers, widths):
"""Compute gaussian Radial Basis Function features
Parameters
----------
X: array_like
shape (n_samples, n_features)
centers: array_like
shape (n_centers, n_features)
widths: array_like
shape (n_centers, n_features) or (n_centers,)
Returns
-------
X_RBF: array
RBF feature matrix, shape=(n_samples, n_centers)
"""
X, centers, widths = map(np.asarray, (X, centers, widths))
if widths.ndim == 1:
widths = widths[:, np.newaxis]
return np.exp(-0.5 * ((X[:, np.newaxis, :]
- centers) / widths) ** 2).sum(-1)
plt.figure(figsize=(5, 5))
plt.subplots_adjust(hspace=0.05, wspace=0.05,
left=0.1, right=0.95,
bottom=0.1, top=0.95)
#----------------------------------------------------------------------
# first do a simple linear regression between the r-band and redshift,
# ignoring uncertainties
ax = plt.subplot(221)
X_train = mag_train[:, [0, 3]]
X_test = mag_test[:, [0, 3]]
z_fit = LinearRegression().fit(X_train, z_train).predict(X_test)
plot_results(z_test, z_fit,
plotlabel='Linear Regression:\n r-band',
xlabel=False)
#----------------------------------------------------------------------
# next do a linear regression with all bands
ax = plt.subplot(222)
z_fit = LinearRegression().fit(mag_train, z_train).predict(mag_test)
plot_results(z_test, z_fit, plotlabel="Linear Regression:\n ugriz bands",
xlabel=False, ylabel=False)
#----------------------------------------------------------------------
# next do a 3rd-order polynomial regression with all bands
ax = plt.subplot(223)
X_train = poly_features(mag_train, 3)
X_test = poly_features(mag_test, 3)
z_fit = LinearRegression().fit(X_train, z_train).predict(X_test)
plot_results(z_test, z_fit, plotlabel="3rd order Polynomial\nRegression")
#----------------------------------------------------------------------
# next do a radial basis function regression with all bands
ax = plt.subplot(224)
# remove bias term
mag = mag[:, 1:]
mag_train = mag_train[:, 1:]
mag_test = mag_test[:, 1:]
centers = mag[np.random.randint(mag.shape[0], size=100)]
centers_dist = euclidean_distances(centers, centers, squared=True)
widths = np.sqrt(centers_dist[:, :10].mean(1))
X_train = gaussian_RBF_features(mag_train, centers, widths)
X_test = gaussian_RBF_features(mag_test, centers, widths)
z_fit = LinearRegression().fit(X_train, z_train).predict(X_test)
plot_results(z_test, z_fit, plotlabel="Gaussian Basis Function\nRegression",
ylabel=False)
plt.show()
|
nhuntwalker/astroML
|
book_figures/chapter9/fig_photoz_basic.py
|
Python
|
bsd-2-clause
| 5,824
|
[
"Gaussian"
] |
1c8bab5b0efd930a355da2366d123fbe6f5122280d5ed9a75b3e6da82732e8b7
|
from behave import *
from time import time, sleep
from selenium.common.exceptions import NoSuchElementException, WebDriverException, StaleElementReferenceException
from selenium.webdriver import ActionChains
try:
from mist.io.tests.settings import LOCAL
except ImportError:
LOCAL = True
pass
def i_am_in_homepage(context):
possible_urls = [context.mist_config['MIST_URL']]
if not possible_urls[0].endswith('/'):
temp = possible_urls[0]
possible_urls[0] = temp + '/'
possible_urls.append(temp)
possible_urls.append(possible_urls[0] + '#')
possible_urls.append(possible_urls[0] + '#' + '/')
return context.browser.current_url in possible_urls
@when(u'I visit mist.io')
def visit(context):
"""
This method will visit the mist.io instance specified by MIST_URL in the
settings file and if it lands on the sign in page then it will wait for
the page to load, otherwise if it lands in the splash page then it will
sleep for one second and then proceed. If you wish to wait for the splash
page to load then you should use the "Then I wait for the mist.io splash
page to load" rule.
"""
context.browser.get(context.mist_config['MIST_URL'])
end_time = time() + 4
while time() < end_time:
try:
context.browser.find_element_by_id("splash")
return
except NoSuchElementException:
sleep(1)
assert False, "Splash page did not load after waiting for 4 seconds"
@then(u'I wait for the mist.io splash page to load')
def standard_splash_waiting(context):
"""
Function that waits for the splash to load. The maximum time for the page
to load is 60 seconds in this case
"""
wait_for_splash_to_appear(context)
wait_for_splash_to_load(context)
@then(u'I wait for the mist.io splash page to load for max {seconds} seconds')
def splash_waiting_with_timeout(context, seconds):
"""
Function that waits for the splash page to load but fora maximum amount
of seconds. The amount of time given must be enough for the splash page
to appear first and then also load.
"""
wait_for_splash_to_appear(context, 10)
wait_for_splash_to_load(context, timeout=(int(seconds)-10))
def wait_for_splash_to_appear(context, timeout=20):
end = time() + timeout
while time() < end:
try:
context.browser.find_element_by_id("splash")
return
except NoSuchElementException:
sleep(1)
assert False, u'Splash did not appear after %s seconds' % str(timeout)
def wait_for_splash_to_load(context, timeout=60):
end = time() + timeout
while time() < end:
splash_page = context.browser.find_element_by_id("splash")
display = splash_page.value_of_css_property("display")
if 'none' in display:
return
assert False, u'Page took longer than %s seconds to load' % str(timeout)
@then(u'I wait for {seconds} seconds')
def wait(context, seconds):
sleep(int(seconds))
@when(u'I wait for {seconds} seconds')
def wait(context, seconds):
sleep(int(seconds))
@then(u'I click the button "{text}"')
def then_click(context, text):
return click_button(context, text)
@when(u'I click the button "{text}"')
def click_button(context, text):
"""
This function will try to click a button that says exactly the same thing as
the text given. If it doesn't find any button like that then it will try
to find a button that contains the text given.
"""
click_button_from_collection(context, text,
error_message='Could not find button that contains %s'
% text)
@when(u'I click the "{text}" button inside the "{popup}" popup')
def click_button_within_popup(context, text, popup):
popups = context.browser.find_elements_by_class_name("ui-popup-active")
for pop in popups:
if popup.lower() in pop.text.lower():
if text == '_x_':
buttons = pop.find_elements_by_class_name("close")
assert len(buttons) > 0, "Could not find the close button"
for i in range(0, 2):
try:
clicketi_click(context, buttons[0])
return
except WebDriverException:
sleep(1)
assert False, u'Could not click the close button'
else:
buttons = pop.find_elements_by_class_name("ui-btn")
click_button_from_collection(context, text, buttons,
'Could not find %s button in %s popup'
% (text, popup))
return
assert False, "Could not find popup with title %s" % popup
@when(u'I click the "{text}" button inside the "{panel_title}" panel')
def click_button_within_panel(context, text, panel_title):
panels = filter(lambda panel: 'ui-collapsible-collapsed' not in
panel.get_attribute('class'),
context.browser.find_elements_by_class_name(
"ui-collapsible"))
assert panels, u'No open panels found. Maybe the driver got refocused ' \
u'or the panel failed to open'
found_panel = None
for panel in panels:
header = panel.find_element_by_class_name("ui-collapsible-heading")
header = header.find_element_by_class_name("title")
if panel_title.lower() in header.text.lower():
found_panel = panel
break
assert found_panel, u'Panel with Title %s could not be found. Maybe the ' \
u'driver got refocused or the panel failed to open or ' \
u'there is no panel with that title' % panel_title
buttons = found_panel.find_elements_by_class_name("ui-btn")
click_button_from_collection(context, text, buttons,
error_message='Could not find %s button'
' inside %s panel' %
(text, panel_title))
def click_button_from_collection(context, text, button_collection=None,
error_message="Could not find button"):
button = search_for_button(context, text, button_collection)
assert button, error_message
for i in range(0, 2):
try:
clicketi_click(context, button)
return
except WebDriverException:
sleep(1)
assert False, u'Could not click button that says %s' % button.text
def search_for_button(context, text, button_collection=None, btn_cls='ui-btn'):
if not button_collection:
button_collection = context.browser.find_elements_by_class_name(btn_cls)
# search for button with exactly the same text. sometimes the driver returns
# the same element more than once and that's why we return the first
# element of the list
# also doing some cleaning if the text attribute also sends back texts
# of sub elements
button = filter(lambda b: b.text.rstrip().lstrip().split('\n')[0].lower() == text.lower()
and b.value_of_css_property('display') == 'block',
button_collection)
if len(button) > 0:
return button[0]
# if we haven't found the exact text then we search for something that
# looks like it
for button in button_collection:
button_text = button.text.split('\n')
if len(filter(lambda b: text.lower() in b.lower(), button_text)) > 0:
return button
return None
def clicketi_click(context, button):
"""
trying two different ways of clicking a button because sometimes the
Chrome driver for no apparent reason misinterprets the offset and
size of the button
"""
try:
button.click()
except WebDriverException:
action_chain = ActionChains(context.browser)
action_chain.move_to_element(button)
action_chain.click()
action_chain.perform()
@then(u'the title should be "{text}"')
def assert_title_is(context, text):
assert text == context.browser.title
@then(u'the title should contain "{text}"')
def assert_title_contains(context, text):
assert text in context.browser.title
@then(u'I wait for the links in homepage to appear')
def wait_for_buttons_to_appear(context):
end_time = time() + 100
while time() < end_time:
try:
images_button = search_for_button(context, 'Images')
counter_span = images_button.find_element_by_class_name("ui-li-count")
counter = int(counter_span.text)
break
except (NoSuchElementException, StaleElementReferenceException,
ValueError, AttributeError) as e:
assert time() + 1 < end_time, "Links in the home page have not" \
" appeared after 10 seconds"
sleep(1)
@then(u'{counter_title} counter should be greater than {counter_number} within '
u'{seconds} seconds')
def some_counter_loaded(context, counter_title, counter_number, seconds):
counter_found = search_for_button(context, counter_title)
assert counter_found, "Counter with name %s has not been found" % counter_title
end_time = time() + int(seconds)
while time() < end_time:
counter_span = counter_found.find_element_by_class_name("ui-li-count")
counter = int(counter_span.text)
if counter > int(counter_number):
return
else:
sleep(2)
assert False, u'The counter did not say that more than %s images were ' \
u'loaded' % counter_number
@when(u'I visit the {title} page after the counter has loaded')
def go_to_some_page_after_loading(context, title):
"""
WIll visit one of the basic pages(Machines, Images, Keys, Scripts) and has
the choice of waiting for the counter to load.
For now the code will not be very accurate for keys page
"""
go_to_some_page_after_counter_loading(context, title, title)
@when(u'I visit the {title} page after the {counter_title} counter has loaded')
def go_to_some_page_after_counter_loading(context, title, counter_title):
"""
WIll visit one of the basic pages(Machines, Images, Keys, Scripts) and has
the choice of waiting for some of the counters to load
For now the code will not be very accurate for keys page
"""
if title not in ['Machines', 'Images', 'Keys', 'Networks', 'Scripts']:
raise ValueError('The page given is unknown')
if counter_title not in ['Machines', 'Images', 'Keys', 'Networks', 'Scripts']:
raise ValueError('The page given is unknown')
context.execute_steps(u'Then I wait for the links in homepage to appear')
context.execute_steps(u'Then %s counter should be greater than 0 '
u'within 80 seconds' % counter_title)
go_to_some_page_without_waiting(context, title)
@when(u'I visit the {title} page')
def go_to_some_page_without_waiting(context, title):
"""
WIll visit one of the basic pages(Machines, Images, Keys, Scripts) without
waiting for the counter or the list on the page to load.
For now the code will not be very accurate for keys page
"""
if title not in ['Machines', 'Images', 'Keys', 'Networks', 'Scripts']:
raise ValueError('The page given is unknown')
if not i_am_in_homepage(context):
if not str(context.browser.current_url).endswith(title.lower()):
context.execute_steps(u'When I click the button "Home"')
context.execute_steps(u'Then I wait for the links in homepage to appear')
context.execute_steps(u'When I click the button "%s"' % title)
end_time = time() + 5
while time() < end_time:
try:
context.browser.find_element_by_id('%s-list-page' % title.lower().rpartition(title[-1])[0])
break
except NoSuchElementException:
assert time() + 1 < end_time, "%s list page has not appeared " \
"after 5 seconds" % title.lower()
sleep(1)
# this code will stop waiting after 3 seconds if nothing appears otherwise
# it will stop as soon as a list is loaded
end_time = time() + 3
while time() < end_time:
try:
list_of_things = context.browser.find_element_by_id('%s-list' % title.lower().rpartition(title[-1])[0])
lis = list_of_things.find_elements_by_tag_name('li')
if len(lis) > 0:
break
except NoSuchElementException:
pass
sleep(1)
|
DimensionDataCBUSydney/mist.io
|
src/mist/io/tests/gui/features/steps/general.py
|
Python
|
agpl-3.0
| 12,769
|
[
"VisIt"
] |
43413d17e587fec475761472a2be42c9707d4c352dfdb1160c05e117396e2003
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing `tf.data.experimental.SqlDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sqlite3
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SqlDatasetTestBase(test_base.DatasetTestBase):
"""Base class for setting up and testing SqlDataset."""
def _createSqlDataset(self, output_types, num_repeats=1):
dataset = readers.SqlDataset(self.driver_name, self.data_source_name,
self.query, output_types).repeat(num_repeats)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
return init_op, get_next
def setUp(self):
self.data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite")
self.driver_name = array_ops.placeholder_with_default(
array_ops.constant("sqlite", dtypes.string), shape=[])
self.query = array_ops.placeholder(dtypes.string, shape=[])
conn = sqlite3.connect(self.data_source_name)
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS students")
c.execute("DROP TABLE IF EXISTS people")
c.execute("DROP TABLE IF EXISTS townspeople")
c.execute(
"CREATE TABLE IF NOT EXISTS students (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), motto VARCHAR(100), "
"school_id VARCHAR(100), favorite_nonsense_word VARCHAR(100), "
"desk_number INTEGER, income INTEGER, favorite_number INTEGER, "
"favorite_big_number INTEGER, favorite_negative_number INTEGER, "
"favorite_medium_sized_number INTEGER, brownie_points INTEGER, "
"account_balance INTEGER, registration_complete INTEGER)")
c.executemany(
"INSERT INTO students (first_name, last_name, motto, school_id, "
"favorite_nonsense_word, desk_number, income, favorite_number, "
"favorite_big_number, favorite_negative_number, "
"favorite_medium_sized_number, brownie_points, account_balance, "
"registration_complete) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
[("John", "Doe", "Hi!", "123", "n\0nsense", 9, 0, 2147483647,
9223372036854775807, -2, 32767, 0, 0, 1),
("Jane", "Moe", "Hi again!", "1000", "nonsense\0", 127, -20000,
-2147483648, -9223372036854775808, -128, -32768, 255, 65535, 0)])
c.execute(
"CREATE TABLE IF NOT EXISTS people (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), state VARCHAR(100))")
c.executemany(
"INSERT INTO PEOPLE (first_name, last_name, state) VALUES (?, ?, ?)",
[("Benjamin", "Franklin", "Pennsylvania"), ("John", "Doe",
"California")])
c.execute(
"CREATE TABLE IF NOT EXISTS townspeople (id INTEGER NOT NULL PRIMARY "
"KEY, first_name VARCHAR(100), last_name VARCHAR(100), victories "
"FLOAT, accolades FLOAT, triumphs FLOAT)")
c.executemany(
"INSERT INTO townspeople (first_name, last_name, victories, "
"accolades, triumphs) VALUES (?, ?, ?, ?, ?)",
[("George", "Washington", 20.00,
1331241.321342132321324589798264627463827647382647382643874,
9007199254740991.0),
("John", "Adams", -19.95,
1331241321342132321324589798264627463827647382647382643874.0,
9007199254740992.0)])
conn.commit()
conn.close()
|
asimshankar/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/sql_dataset_test_base.py
|
Python
|
apache-2.0
| 4,471
|
[
"MOE"
] |
3456ee54dba1b53f5439201331184d06555d6fcd01e893fa4ab70cc5c1747870
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().sgettext
import re
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
from ....lib.nameorigintype import NameOriginType
#-------------------------------------------------------------------------
#
# HasNameOf
#
#-------------------------------------------------------------------------
class HasNameOf(Rule):
"""Rule that checks for full or partial name matches"""
labels = [ _('Given name:'),
_('Full Family name:'),
_('person|Title:'),
_('Suffix:'),
_('Call Name:'),
_('Nick Name:'),
_('Prefix:'),
_('Single Surname:'),
_('Connector'),
_('Patronymic:'),
_('Family Nick Name:'),
_('Regular-Expression matching:')]
name = _('People with the <name>')
description = _("Matches people with a specified (partial) name")
category = _('General filters')
def prepare(self, db):
if len(self.list) >= 12:
self.regular_expression = bool(int(self.list[11]))
else:
self.regular_expression = False
if self.regular_expression:
self.firstn = self.list[0]
self.lastn = self.list[1]
self.title = self.list[2]
self.suffix = self.list[3]
self.calln = self.list[4]
self.nick = self.list[5]
self.famnick = self.list[10]
#surname parts
self.prefix = self.list[6]
self.surn = self.list[7]
self.con = self.list[8]
self.patr = self.list[9]
else:
self.firstn = self.list[0].upper()
self.lastn = self.list[1].upper()
self.title = self.list[2].upper()
self.suffix = self.list[3].upper()
self.calln = self.list[4].upper()
self.nick = self.list[5].upper()
self.famnick = self.list[10].upper()
#surname parts
self.prefix = self.list[6].upper()
self.surn = self.list[7].upper()
self.con = self.list[8].upper()
self.patr = self.list[9].upper()
def apply(self, db, person):
for name in [person.get_primary_name()] + person.get_alternate_names():
val = 1
valpref = 0
if not self.prefix:
valpref = 1
valsurn = 0
if not self.surn:
valsurn = 1
valcon = 0
if not self.con:
valcon = 1
valpatr = 0
if not self.patr:
valpatr = 1
if self.regular_expression:
try:
if self.firstn and not re.match(self.firstn, name.get_first_name(), re.I|re.U|re.L):
val = 0
elif self.lastn and not re.match(self.lastn, name.get_surname(), re.I|re.U|re.L):
val = 0
elif self.suffix and not re.match(self.suffix, name.get_suffix(), re.I|re.U|re.L):
val = 0
elif self.title and not re.match(self.title, name.get_title(), re.I|re.U|re.L):
val = 0
elif self.calln and not re.match(self.calln, name.get_call_name(), re.I|re.U|re.L):
val = 0
elif self.nick and not re.match(self.nick, name.get_nick_name(), re.I|re.U|re.L):
val = 0
elif self.famnick and not re.match(self.famnick, name.get_family_nick_name(), re.I|re.U|re.L):
val = 0
else:
#obtain surnames
for surn in name.get_surname_list():
if self.prefix and re.match(self.prefix, surn.get_prefix(), re.I|re.U|re.L):
valpref = 1
if self.surn and re.match(self.surn, surn.get_surname(), re.I|re.U|re.L):
valsurn = 1
if self.con and re.match(self.con, surn.get_connector(), re.I|re.U|re.L):
valcon = 1
if self.patr and surn.get_origintype().value == NameOriginType.PATRONYMIC \
and re.match(self.patr, surn.get_surname(), re.I|re.U|re.L):
valpatr = 1
except re.error:
#indicate error in the pattern by matching everyone
return True
else:
if self.firstn and name.get_first_name().upper().find(self.firstn) == -1:
val = 0
elif self.lastn and name.get_surname().upper().find(self.lastn) == -1:
val = 0
elif self.suffix and name.get_suffix().upper().find(self.surn) == -1:
val = 0
elif self.title and name.get_title().upper().find(self.title) == -1:
val = 0
elif self.calln and name.get_call_name().upper().find(self.calln) == -1:
val = 0
elif self.nick and name.get_nick_name().upper().find(self.nick) == -1:
val = 0
elif self.famnick and name.get_family_nick_name().upper().find(self.famnick) == -1:
val = 0
else:
#obtain surnames
for surn in name.get_surname_list():
if self.prefix and surn.get_prefix().upper().find(self.prefix) != -1:
valpref = 1
if self.surn and surn.get_surname().upper().find(self.surn) != -1:
valsurn = 1
if self.con and surn.get_connector().upper().find(self.con) != -1:
valcon = 1
if self.patr and surn.get_origintype().value == NameOriginType.PATRONYMIC \
and surn.get_surname().upper().find(self.patr) != -1:
valpatr = 1
if val == 1 and valpref == 1 and valsurn == 1 and valcon == 1 and valpatr ==1:
return True
return False
|
Forage/Gramps
|
gramps/gen/filters/rules/person/_hasnameof.py
|
Python
|
gpl-2.0
| 7,623
|
[
"Brian"
] |
b7a84ca1ee2e2caa503b2691241c9d1733844bc4a028bd236cac087170f4de0b
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Notification subsystem background jobs."""
__author__ = [
'johncox@google.com (John Cox)',
]
import datetime
import logging
from common import utils as common_utils
from controllers import sites
from controllers import utils as controllers_utils
from models import utils as model_utils
from modules.notifications import notifications
from google.appengine.ext import db
from google.appengine.ext import deferred
_LOG = logging.getLogger('modules.notifications.cron')
logging.basicConfig()
@db.transactional(xg=True)
def process_notification(notification, now, stats):
notification_key = notification.key()
policy = None
stats.started += 1
# Treat as module-protected. pylint: disable=protected-access
if notification._done_date:
_LOG.info(
'Skipping offline processing of notification with key %s; already '
'done at %s', notification_key, notification._done_date
)
stats.skipped_already_done += 1
return
if notifications.Manager._is_still_enqueued(notification, now):
_LOG.info(
'Skipping offline processing of notification with key %s; still on '
'queue (last enqueued: %s)', notification_key,
notification._last_enqueue_date)
stats.skipped_still_enqueued += 1
return
payload_key = db.Key.from_path(
notifications.Payload.kind(),
notifications.Payload.key_name(
notification.to, notification.intent, notification.enqueue_date)
)
payload = db.get(payload_key)
if not payload:
_LOG.error(
'Could not process notification with key %s; associated payload '
'with key %s not found', notification_key, payload_key
)
stats.missing_payload += 1
return
if notifications.Manager._is_too_old_to_reenqueue(
notification.enqueue_date, now):
stats.too_old += 1
exception = notifications.NotificationTooOldError((
'Notification %s with enqueue_date %s too old to re-enqueue at %s; '
'limit is %s days') % (
notification_key, notification.enqueue_date, now,
notifications._MAX_RETRY_DAYS,
))
notifications.Manager._mark_failed(
notification, now, exception, permanent=True)
if notification._fail_date or notification._send_date:
policy = notifications._RETENTION_POLICIES.get(
notification._retention_policy)
notifications.Manager._mark_done(notification, now)
if policy:
policy.run(notification, payload)
stats.policy_run += 1
else:
_LOG.warning(
'Cannot apply retention policy %s to notification %s and '
'payload %s; policy not found. Existing policies are: %s',
notification._retention_policy, notification_key, payload_key,
', '.join(sorted(notifications._RETENTION_POLICIES.keys()))
)
stats.missing_policy += 1
db.put([notification, payload])
else:
notifications.Manager._mark_enqueued(notification, now)
db.put(notification)
deferred.defer(
notifications.Manager._transactional_send_mail_task,
notification_key, payload_key,
_retry_options=notifications.Manager._get_retry_options()
)
stats.reenqueued += 1
class _Stats(object):
def __init__(self, namespace):
self.missing_payload = 0
self.missing_policy = 0
self.namespace = namespace
self.policy_run = 0
self.reenqueued = 0
self.skipped_already_done = 0
self.skipped_still_enqueued = 0
self.started = 0
self.too_old = 0
def __str__(self):
return (
'Stats for namespace "%(namespace)s":'
'\n\tmissing_payload: %(missing_payload)s'
'\n\tmissing_policy: %(missing_policy)s'
'\n\tpolicy_run: %(policy_run)s'
'\n\tre-enqueued: %(reenqueued)s'
'\n\tskipped_already_done: %(skipped_already_done)s'
'\n\tskipped_still_enqueued: %(skipped_still_enqueued)s'
'\n\tstarted: %(started)s'
'\n\ttoo_old: %(too_old)s'
) % self.__dict__
class ProcessPendingNotificationsHandler(controllers_utils.BaseHandler):
"""Iterates through all courses, re-enqueueing or expiring pending items.
Only one of these jobs runs at any given time. This is enforced by App
Engine's 10 minute limit plus scheduling this to run daily.
However, write operations here must still be atomic because admins could
manually visit the handler at any time.
"""
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
namespaces = [
context.get_namespace_name() for context in sites.get_all_courses()
]
now = datetime.datetime.utcnow()
_LOG.info(
'Begin process_pending_notifications cron; found namespaces %s at '
'%s', ', '.join(["'%s'" % n for n in namespaces]), now
)
for namespace in namespaces:
stats = _Stats(namespace)
_LOG.info("Begin processing notifications for namespace '%s'",
namespace)
self._process_records(namespace, now, stats)
_LOG.info('Done processing. %s', stats)
def _process_records(self, namespace, now, stats):
with common_utils.Namespace(namespace):
# Treating as module-protected. pylint: disable=protected-access
mapper = model_utils.QueryMapper(
notifications.Manager._get_in_process_notifications_query())
mapper.run(process_notification, now, stats)
|
GirlsCodePy/girlscode-coursebuilder
|
modules/notifications/cron.py
|
Python
|
gpl-3.0
| 6,414
|
[
"VisIt"
] |
95ddc244ba53a50b06756e5963ed6365004858c227b2cde70e87a580041e362c
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# from: http://www.eol.ucar.edu/projects/ceop/dm/documents/refdata_report/eqns.html
from numpy import exp
import pdb
def CtoF(tf):
return ( tc * 9. / 5. ) + 32.0
def CtoK(tc):
return tc + 273.15
def FtoK(tf):
return ( (tf - 32) * 5.0 / 9.0) + 273.15
def FtoC(tf):
return (tf - 32.0) * 5.0 / 9.0
def KtoC(tk):
return (tk - 273.15 )
def KtoF(tk):
return ( ( tk - 273.15 ) * 9.0 / 5.0 ) + 32.0
def ComputeES(tc):
'''
Tc: Temperature in deg Celsius
es: Saturation vapor pressure in mbar
'''
es = 6.112 * exp( ( 17.67 * tc ) / ( tc + 243.5 ) );
return es
def ComputeE(td):
'''
Td: dew point in deg Celcius
e: vapor pressure in mbar
'''
e = 6.112 * exp( ( 17.67 * td) / ( td + 243.5 ) );
return e
def ComputeQ(h):
'''
td: in celsius
p: in mbar
'''
sp = h.getData( variable='sp' )
sp = sp/100 # from Pa to Mb
d2m = h.getData( variable='d2m' )
d2m = KtoC(d2m)
e = ComputeE(d2m)
q = ( 0.622 * e ) / ( sp - ( 0.378 * e ) );
return q
def ComputeRH( h ):
'''
f is the netCDF file handle containing t2m and d2m in the same file
'''
t2m = h.getData( variable='t2m' )
t2m = KtoC(t2m)
d2m = h.getData( variable='d2m' )
d2m = KtoC(d2m)
es = ComputeES(t2m)
e = ComputeE(d2m)
RH = 100 * (e /es )
return RH
def ComputeJRA25RH( h ):
'''
f is the netCDF file handle containing and TMPprs in DEPRprs the same file
'''
#pdb.set_trace()
ta = h.getData( variable='tmpprs' )
ta = KtoC(ta)
da = h.getData( variable='deprprs' )
da = KtoC(da)
es = ComputeES(ta)
e = ComputeE(da)
RH = 100 * (e /es )
return RH
|
apache/climate
|
obs4MIPs/factory/equations.py
|
Python
|
apache-2.0
| 2,555
|
[
"NetCDF"
] |
0fa172e2ad3f4a88fa08dcddf72cd5949595c84af4359af31da5b131f32d5bc2
|
##
# Copyright 2012-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Unit tests for module_generator.py.
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import os
import sys
import tempfile
from unittest import TextTestRunner, TestSuite
from vsc.utils.fancylogger import setLogLevelDebug, logToScreen
from easybuild.framework.easyconfig.tools import process_easyconfig
from easybuild.tools import config
from easybuild.tools.module_generator import ModuleGeneratorLua, ModuleGeneratorTcl
from easybuild.tools.module_naming_scheme.utilities import is_valid_module_name
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig.easyconfig import EasyConfig, ActiveMNS
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.utilities import quote_str
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered, find_full_path, init_config
class ModuleGeneratorTest(EnhancedTestCase):
"""Tests for module_generator module."""
MODULE_GENERATOR_CLASS = None
def setUp(self):
"""Test setup."""
super(ModuleGeneratorTest, self).setUp()
# find .eb file
eb_path = os.path.join(os.path.join(os.path.dirname(__file__), 'easyconfigs'), 'gzip-1.4.eb')
eb_full_path = find_full_path(eb_path)
self.assertTrue(eb_full_path)
ec = EasyConfig(eb_full_path)
self.eb = EasyBlock(ec)
self.modgen = self.MODULE_GENERATOR_CLASS(self.eb)
self.modgen.app.installdir = tempfile.mkdtemp(prefix='easybuild-modgen-test-')
self.orig_module_naming_scheme = config.get_module_naming_scheme()
def test_descr(self):
"""Test generation of module description (which includes '#%Module' header)."""
gzip_txt = "gzip (GNU zip) is a popular data compression program as a replacement for compress "
gzip_txt += "- Homepage: http://www.gzip.org/"
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
expected = '\n'.join([
"proc ModulesHelp { } {",
" puts stderr { %s" % gzip_txt,
" }",
"}",
'',
"module-whatis {Description: %s}" % gzip_txt,
'',
"set root %s" % self.modgen.app.installdir,
'',
"conflict gzip",
'',
])
else:
expected = '\n'.join([
'help([[%s]])' % gzip_txt,
'',
"whatis([[Description: %s]])" % gzip_txt,
'',
'local root = "%s"' % self.modgen.app.installdir,
'',
'conflict("gzip")',
'',
])
desc = self.modgen.get_description()
self.assertEqual(desc, expected)
# Test description with list of 'whatis' strings
self.eb.cfg['whatis'] = ['foo', 'bar']
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
expected = '\n'.join([
"proc ModulesHelp { } {",
" puts stderr { %s" % gzip_txt,
" }",
"}",
'',
"module-whatis {foo}",
"module-whatis {bar}",
'',
"set root %s" % self.modgen.app.installdir,
'',
"conflict gzip",
'',
])
else:
expected = '\n'.join([
'help([[%s]])' % gzip_txt,
'',
"whatis([[foo]])",
"whatis([[bar]])",
'',
'local root = "%s"' % self.modgen.app.installdir,
'',
'conflict("gzip")',
'',
])
desc = self.modgen.get_description()
self.assertEqual(desc, expected)
def test_load(self):
"""Test load part in generated module file."""
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
# default: guarded module load (which implies no recursive unloading)
expected = '\n'.join([
'',
"if { ![ is-loaded mod_name ] } {",
" module load mod_name",
"}",
'',
])
self.assertEqual(expected, self.modgen.load_module("mod_name"))
# with recursive unloading: no if is-loaded guard
expected = '\n'.join([
'',
"module load mod_name",
'',
])
self.assertEqual(expected, self.modgen.load_module("mod_name", recursive_unload=True))
init_config(build_options={'recursive_mod_unload': True})
self.assertEqual(expected, self.modgen.load_module("mod_name"))
else:
# default: guarded module load (which implies no recursive unloading)
expected = '\n'.join([
'',
'if not isloaded("mod_name") then',
' load("mod_name")',
'end',
'',
])
self.assertEqual(expected, self.modgen.load_module("mod_name"))
# with recursive unloading: no if isloaded guard
expected = '\n'.join([
'',
'load("mod_name")',
'',
])
self.assertEqual(expected, self.modgen.load_module("mod_name", recursive_unload=True))
init_config(build_options={'recursive_mod_unload': True})
self.assertEqual(expected, self.modgen.load_module("mod_name"))
def test_unload(self):
"""Test unload part in generated module file."""
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
expected = '\n'.join([
'',
"module unload mod_name",
])
else:
expected = '\n'.join([
'',
'unload("mod_name")',
])
self.assertEqual(expected, self.modgen.unload_module("mod_name"))
def test_swap(self):
"""Test for swap statements."""
# unguarded swap
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
expected = '\n'.join([
'',
"module swap foo bar",
'',
])
else:
expected = '\n'.join([
'',
'swap("foo", "bar")',
'',
])
self.assertEqual(expected, self.modgen.swap_module('foo', 'bar', guarded=False))
# guarded swap (enabled by default)
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
expected = '\n'.join([
'',
"if { [ is-loaded foo ] } {",
" module swap foo bar",
'} else {',
" module load bar",
'}',
'',
])
else:
expected = '\n'.join([
'',
'if isloaded("foo") then',
' swap("foo", "bar")',
'else',
' load("bar")',
'end',
'',
])
self.assertEqual(expected, self.modgen.swap_module('foo', 'bar', guarded=True))
self.assertEqual(expected, self.modgen.swap_module('foo', 'bar'))
def test_prepend_paths(self):
"""Test generating prepend-paths statements."""
# test prepend_paths
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
expected = ''.join([
"prepend-path\tkey\t\t$root/path1\n",
"prepend-path\tkey\t\t$root/path2\n",
"prepend-path\tkey\t\t$root\n",
])
paths = ['path1', 'path2', '']
self.assertEqual(expected, self.modgen.prepend_paths("key", paths))
# 2nd call should still give same result, no side-effects like manipulating passed list 'paths'!
self.assertEqual(expected, self.modgen.prepend_paths("key", paths))
expected = "prepend-path\tbar\t\t$root/foo\n"
self.assertEqual(expected, self.modgen.prepend_paths("bar", "foo"))
res = self.modgen.prepend_paths("key", ["/abs/path"], allow_abs=True)
self.assertEqual("prepend-path\tkey\t\t/abs/path\n", res)
res = self.modgen.prepend_paths('key', ['1234@example.com'], expand_relpaths=False)
self.assertEqual("prepend-path\tkey\t\t1234@example.com\n", res)
else:
expected = ''.join([
'prepend_path("key", pathJoin(root, "path1"))\n',
'prepend_path("key", pathJoin(root, "path2"))\n',
'prepend_path("key", root)\n',
])
paths = ['path1', 'path2', '']
self.assertEqual(expected, self.modgen.prepend_paths("key", paths))
# 2nd call should still give same result, no side-effects like manipulating passed list 'paths'!
self.assertEqual(expected, self.modgen.prepend_paths("key", paths))
expected = 'prepend_path("bar", pathJoin(root, "foo"))\n'
self.assertEqual(expected, self.modgen.prepend_paths("bar", "foo"))
expected = 'prepend_path("key", "/abs/path")\n'
self.assertEqual(expected, self.modgen.prepend_paths("key", ["/abs/path"], allow_abs=True))
res = self.modgen.prepend_paths('key', ['1234@example.com'], expand_relpaths=False)
self.assertEqual('prepend_path("key", "1234@example.com")\n', res)
self.assertErrorRegex(EasyBuildError, "Absolute path %s/foo passed to prepend_paths " \
"which only expects relative paths." % self.modgen.app.installdir,
self.modgen.prepend_paths, "key2", ["bar", "%s/foo" % self.modgen.app.installdir])
def test_use(self):
"""Test generating module use statements."""
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
# Test regular 'module use' statements
expected = ''.join([
'module use "/some/path"\n',
'module use "/foo/bar/baz"\n',
])
self.assertEqual(self.modgen.use(["/some/path", "/foo/bar/baz"]), expected)
# Test guarded 'module use' statements using prefix
expected = ''.join([
'if { [ file isdirectory [ file join "/foo" "/some/path" ] ] } {\n',
' module use [ file join "/foo" "/some/path" ]\n',
'}\n',
])
self.assertEqual(self.modgen.use(["/some/path"], prefix=quote_str("/foo"), guarded=True), expected)
else:
# Test regular 'module use' statements
expected = ''.join([
'prepend_path("MODULEPATH", "/some/path")\n',
'prepend_path("MODULEPATH", "/foo/bar/baz")\n',
])
self.assertEqual(self.modgen.use(["/some/path", "/foo/bar/baz"]), expected)
# Test guarded 'module use' statements using prefix
expected = ''.join([
'if isDir(pathJoin("/foo", "/some/path")) then\n',
' prepend_path("MODULEPATH", pathJoin("/foo", "/some/path"))\n',
'end\n',
])
self.assertEqual(self.modgen.use(["/some/path"], prefix=quote_str("/foo"), guarded=True), expected)
def test_env(self):
"""Test setting of environment variables."""
# test set_environment
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
self.assertEqual('setenv\tkey\t\t"value"\n', self.modgen.set_environment("key", "value"))
self.assertEqual("setenv\tkey\t\t'va\"lue'\n", self.modgen.set_environment("key", 'va"lue'))
self.assertEqual('setenv\tkey\t\t"va\'lue"\n', self.modgen.set_environment("key", "va'lue"))
self.assertEqual('setenv\tkey\t\t"""va"l\'ue"""\n', self.modgen.set_environment("key", """va"l'ue"""))
else:
self.assertEqual('setenv("key", "value")\n', self.modgen.set_environment("key", "value"))
def test_getenv_cmd(self):
"""Test getting value of environment variable."""
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
self.assertEqual('$env(HOSTNAME)', self.modgen.getenv_cmd('HOSTNAME'))
self.assertEqual('$env(HOME)', self.modgen.getenv_cmd('HOME'))
else:
self.assertEqual('os.getenv("HOSTNAME")', self.modgen.getenv_cmd('HOSTNAME'))
self.assertEqual('os.getenv("HOME")', self.modgen.getenv_cmd('HOME'))
def test_alias(self):
"""Test setting of alias in modulefiles."""
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
# test set_alias
self.assertEqual('set-alias\tkey\t\t"value"\n', self.modgen.set_alias("key", "value"))
self.assertEqual("set-alias\tkey\t\t'va\"lue'\n", self.modgen.set_alias("key", 'va"lue'))
self.assertEqual('set-alias\tkey\t\t"va\'lue"\n', self.modgen.set_alias("key", "va'lue"))
self.assertEqual('set-alias\tkey\t\t"""va"l\'ue"""\n', self.modgen.set_alias("key", """va"l'ue"""))
else:
self.assertEqual('set_alias("key", "value")\n', self.modgen.set_alias("key", "value"))
def test_conditional_statement(self):
"""Test formatting of conditional statements."""
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
simple_cond = self.modgen.conditional_statement("is-loaded foo", "module load bar")
expected = '\n'.join([
"if { [ is-loaded foo ] } {",
" module load bar",
'}',
'',
])
self.assertEqual(simple_cond, expected)
neg_cond = self.modgen.conditional_statement("is-loaded foo", "module load bar", negative=True)
expected = '\n'.join([
"if { ![ is-loaded foo ] } {",
" module load bar",
'}',
'',
])
self.assertEqual(neg_cond, expected)
if_else_cond = self.modgen.conditional_statement("is-loaded foo", "module load bar", else_body='puts "foo"')
expected = '\n'.join([
"if { [ is-loaded foo ] } {",
" module load bar",
"} else {",
' puts "foo"',
'}',
'',
])
self.assertEqual(if_else_cond, expected)
elif self.MODULE_GENERATOR_CLASS == ModuleGeneratorLua:
simple_cond = self.modgen.conditional_statement('isloaded("foo")', 'load("bar")')
expected = '\n'.join([
'if isloaded("foo") then',
' load("bar")',
'end',
'',
])
self.assertEqual(simple_cond, expected)
neg_cond = self.modgen.conditional_statement('isloaded("foo")', 'load("bar")', negative=True)
expected = '\n'.join([
'if not isloaded("foo") then',
' load("bar")',
'end',
'',
])
self.assertEqual(neg_cond, expected)
if_else_cond = self.modgen.conditional_statement('isloaded("foo")', 'load("bar")', else_body='load("bleh")')
expected = '\n'.join([
'if isloaded("foo") then',
' load("bar")',
'else',
' load("bleh")',
'end',
'',
])
self.assertEqual(if_else_cond, expected)
else:
self.assertTrue(False, "Unknown module syntax")
def test_load_msg(self):
"""Test including a load message in the module file."""
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
tcl_load_msg = '\n'.join([
'',
"if { [ module-info mode load ] } {",
" puts stderr \"test \\$test \\$test",
" test \\$foo \\$bar\"",
"}",
'',
])
self.assertEqual(tcl_load_msg, self.modgen.msg_on_load('test $test \\$test\ntest $foo \\$bar'))
else:
pass
def test_module_naming_scheme(self):
"""Test using default module naming scheme."""
all_stops = [x[0] for x in EasyBlock.get_steps()]
init_config(build_options={'valid_stops': all_stops})
ecs_dir = os.path.join(os.path.dirname(__file__), 'easyconfigs')
ec_files = [os.path.join(subdir, fil) for (subdir, _, files) in os.walk(ecs_dir) for fil in files]
# TODO FIXME: drop this once 2.0/.yeb support works
ec_files = [fil for fil in ec_files if not ('v2.0/' in fil or 'yeb/' in fil)]
build_options = {
'check_osdeps': False,
'external_modules_metadata': {},
'robot_path': [ecs_dir],
'valid_stops': all_stops,
'validate': False,
}
init_config(build_options=build_options)
def test_mns():
"""Test default module naming scheme."""
# test default naming scheme
for ec_file in [f for f in ec_files if not 'broken' in os.path.basename(f)]:
ec_path = os.path.abspath(ec_file)
ecs = process_easyconfig(ec_path, validate=False)
# derive module name directly from easyconfig file name
ec_fn = os.path.basename(ec_file)
if ec_fn in ec2mod_map:
# only check first, ignore any others (occurs when blocks are used (format v1.0 only))
self.assertEqual(ec2mod_map[ec_fn], ActiveMNS().det_full_module_name(ecs[0]['ec']))
# test default module naming scheme
default_ec2mod_map = {
'GCC-4.6.3.eb': 'GCC/4.6.3',
'gzip-1.4.eb': 'gzip/1.4',
'gzip-1.4-GCC-4.6.3.eb': 'gzip/1.4-GCC-4.6.3',
'gzip-1.5-goolf-1.4.10.eb': 'gzip/1.5-goolf-1.4.10',
'gzip-1.5-ictce-4.1.13.eb': 'gzip/1.5-ictce-4.1.13',
'toy-0.0.eb': 'toy/0.0',
'toy-0.0-multiple.eb': 'toy/0.0-somesuffix', # first block sets versionsuffix to '-somesuffix'
}
ec2mod_map = default_ec2mod_map
test_mns()
# generating module name from non-parsed easyconfig works fine
non_parsed = {
'name': 'foo',
'version': '1.2.3',
'versionsuffix': '-bar',
'toolchain': {
'name': 't00ls',
'version': '6.6.6',
},
}
self.assertEqual('foo/1.2.3-t00ls-6.6.6-bar', ActiveMNS().det_full_module_name(non_parsed))
# make sure test module naming schemes are available
mns_mods = ['broken_module_naming_scheme', 'test_module_naming_scheme', 'test_module_naming_scheme_more']
for test_mns_mod in mns_mods:
mns_path = "easybuild.tools.module_naming_scheme.%s" % test_mns_mod
__import__(mns_path, globals(), locals(), [''])
init_config(build_options=build_options)
# verify that key errors in module naming scheme are reported properly
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'BrokenModuleNamingScheme'
init_config(build_options=build_options)
err_pattern = 'nosucheasyconfigparameteravailable'
self.assertErrorRegex(EasyBuildError, err_pattern, EasyConfig, os.path.join(ecs_dir, 'gzip-1.5-goolf-1.4.10.eb'))
# test simple custom module naming scheme
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'TestModuleNamingScheme'
init_config(build_options=build_options)
ec2mod_map = {
'GCC-4.6.3.eb': 'GCC/4.6.3',
'gzip-1.4.eb': 'gzip/1.4',
'gzip-1.4-GCC-4.6.3.eb': 'gnu/gzip/1.4',
'gzip-1.5-goolf-1.4.10.eb': 'gnu/openmpi/gzip/1.5',
'gzip-1.5-ictce-4.1.13.eb': 'intel/intelmpi/gzip/1.5',
'toy-0.0.eb': 'toy/0.0',
'toy-0.0-multiple.eb': 'toy/0.0', # test module naming scheme ignores version suffixes
}
test_mns()
ec = EasyConfig(os.path.join(ecs_dir, 'gzip-1.5-goolf-1.4.10.eb'))
self.assertEqual(ec.toolchain.det_short_module_name(), 'goolf/1.4.10')
# test module naming scheme using all available easyconfig parameters
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'TestModuleNamingSchemeMore'
init_config(build_options=build_options)
# note: these checksums will change if another easyconfig parameter is added
ec2mod_map = {
'GCC-4.6.3.eb': 'GCC/9e9ab5a1e978f0843b5aedb63ac4f14c51efb859',
'gzip-1.4.eb': 'gzip/53d5c13e85cb6945bd43a58d1c8d4a4c02f3462d',
'gzip-1.4-GCC-4.6.3.eb': 'gzip/585eba598f33c64ef01c6fa47af0fc37f3751311',
'gzip-1.5-goolf-1.4.10.eb': 'gzip/fceb41e04c26b540b7276c4246d1ecdd1e8251c9',
'gzip-1.5-ictce-4.1.13.eb': 'gzip/ae16b3a0a330d4323987b360c0d024f244ac4498',
'toy-0.0.eb': 'toy/44a206d9e8c14130cc9f79e061468303c6e91b53',
'toy-0.0-multiple.eb': 'toy/44a206d9e8c14130cc9f79e061468303c6e91b53',
}
test_mns()
# test determining module name for dependencies (i.e. non-parsed easyconfigs)
# using a module naming scheme that requires all easyconfig parameters
ec2mod_map['gzip-1.5-goolf-1.4.10.eb'] = 'gzip/.fceb41e04c26b540b7276c4246d1ecdd1e8251c9'
for dep_ec, dep_spec in [
('GCC-4.6.3.eb', {
'name': 'GCC',
'version': '4.6.3',
'versionsuffix': '',
'toolchain': {'name': 'dummy', 'version': 'dummy'},
'hidden': False,
}),
('gzip-1.5-goolf-1.4.10.eb', {
'name': 'gzip',
'version': '1.5',
'versionsuffix': '',
'toolchain': {'name': 'goolf', 'version': '1.4.10'},
'hidden': True,
}),
('toy-0.0-multiple.eb', {
'name': 'toy',
'version': '0.0',
'versionsuffix': '-multiple',
'toolchain': {'name': 'dummy', 'version': 'dummy'},
'hidden': False,
}),
]:
# determine full module name
self.assertEqual(ActiveMNS().det_full_module_name(dep_spec), ec2mod_map[dep_ec])
ec = EasyConfig(os.path.join(ecs_dir, 'gzip-1.5-goolf-1.4.10.eb'), hidden=True)
self.assertEqual(ec.full_mod_name, ec2mod_map['gzip-1.5-goolf-1.4.10.eb'])
self.assertEqual(ec.toolchain.det_short_module_name(), 'goolf/a86eb41d8f9c1d6f2d3d61cdb8f420cc2a21cada')
# restore default module naming scheme, and retest
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = self.orig_module_naming_scheme
init_config(build_options=build_options)
ec2mod_map = default_ec2mod_map
test_mns()
def test_mod_name_validation(self):
"""Test module naming validation."""
# module name must be a string
self.assertTrue(not is_valid_module_name(('foo', 'bar')))
self.assertTrue(not is_valid_module_name(['foo', 'bar']))
self.assertTrue(not is_valid_module_name(123))
# module name must be relative
self.assertTrue(not is_valid_module_name('/foo/bar'))
# module name must only contain valid characters
self.assertTrue(not is_valid_module_name('foo\x0bbar'))
self.assertTrue(not is_valid_module_name('foo\x0cbar'))
self.assertTrue(not is_valid_module_name('foo\rbar'))
self.assertTrue(not is_valid_module_name('foo\0bar'))
# valid module name must be accepted
self.assertTrue(is_valid_module_name('gzip/goolf-1.4.10-suffix'))
self.assertTrue(is_valid_module_name('GCC/4.7.2'))
self.assertTrue(is_valid_module_name('foo-bar/1.2.3'))
self.assertTrue(is_valid_module_name('ictce'))
def test_is_short_modname_for(self):
"""Test is_short_modname_for method of module naming schemes."""
test_cases = [
('GCC/4.7.2', 'GCC', True),
('gzip/1.6-gompi-1.4.10', 'gzip', True),
('OpenMPI/1.6.4-GCC-4.7.2-no-OFED', 'OpenMPI', True),
('BLACS/1.1-gompi-1.1.0-no-OFED', 'BLACS', True),
('ScaLAPACK/1.8.0-gompi-1.1.0-no-OFED-ATLAS-3.8.4-LAPACK-3.4.0-BLACS-1.1', 'ScaLAPACK', True),
('netCDF-C++/4.2-goolf-1.4.10', 'netCDF-C++', True),
('gcc/4.7.2', 'GCC', False),
('ScaLAPACK/1.8.0-gompi-1.1.0-no-OFED-ATLAS-3.8.4-LAPACK-3.4.0-BLACS-1.1', 'BLACS', False),
('apps/blacs/1.1', 'BLACS', False),
('lib/math/BLACS-stable/1.1', 'BLACS', False),
# required so PrgEnv can be listed versionless as external module in Cray toolchains
('PrgEnv', 'PrgEnv', True),
]
for modname, softname, res in test_cases:
if res:
errormsg = "%s is recognised as a module for '%s'" % (modname, softname)
else:
errormsg = "%s is NOT recognised as a module for '%s'" % (modname, softname)
self.assertEqual(ActiveMNS().is_short_modname_for(modname, softname), res, errormsg)
def test_hierarchical_mns(self):
"""Test hierarchical module naming scheme."""
moduleclasses = ['base', 'compiler', 'mpi', 'numlib', 'system', 'toolchain']
ecs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
all_stops = [x[0] for x in EasyBlock.get_steps()]
build_options = {
'check_osdeps': False,
'robot_path': [ecs_dir],
'valid_stops': all_stops,
'validate': False,
'valid_module_classes': moduleclasses,
}
def test_ec(ecfile, short_modname, mod_subdir, modpath_exts, user_modpath_exts, init_modpaths):
"""Test whether active module naming scheme returns expected values."""
ec = EasyConfig(os.path.join(ecs_dir, ecfile))
self.assertEqual(ActiveMNS().det_full_module_name(ec), os.path.join(mod_subdir, short_modname))
self.assertEqual(ActiveMNS().det_short_module_name(ec), short_modname)
self.assertEqual(ActiveMNS().det_module_subdir(ec), mod_subdir)
self.assertEqual(ActiveMNS().det_modpath_extensions(ec), modpath_exts)
self.assertEqual(ActiveMNS().det_user_modpath_extensions(ec), user_modpath_exts)
self.assertEqual(ActiveMNS().det_init_modulepaths(ec), init_modpaths)
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'HierarchicalMNS'
init_config(build_options=build_options)
# format: easyconfig_file: (short_mod_name, mod_subdir, modpath_exts, user_modpath_exts, init_modpaths)
iccver = '2013.5.192-GCC-4.8.3'
impi_ec = 'impi-4.1.3.049-iccifort-2013.5.192-GCC-4.8.3.eb'
imkl_ec = 'imkl-11.1.2.144-iimpi-5.5.3-GCC-4.8.3.eb'
test_ecs = {
'GCC-4.7.2.eb': ('GCC/4.7.2', 'Core', ['Compiler/GCC/4.7.2'],
['Compiler/GCC/4.7.2'], ['Core']),
'OpenMPI-1.6.4-GCC-4.7.2.eb': ('OpenMPI/1.6.4', 'Compiler/GCC/4.7.2', ['MPI/GCC/4.7.2/OpenMPI/1.6.4'],
['MPI/GCC/4.7.2/OpenMPI/1.6.4'], ['Core']),
'gzip-1.5-goolf-1.4.10.eb': ('gzip/1.5', 'MPI/GCC/4.7.2/OpenMPI/1.6.4', [],
[], ['Core']),
'goolf-1.4.10.eb': ('goolf/1.4.10', 'Core', [],
[], ['Core']),
'icc-2013.5.192-GCC-4.8.3.eb': ('icc/%s' % iccver, 'Core', ['Compiler/intel/%s' % iccver],
['Compiler/intel/%s' % iccver], ['Core']),
'ifort-2013.3.163.eb': ('ifort/2013.3.163', 'Core', ['Compiler/intel/2013.3.163'],
['Compiler/intel/2013.3.163'], ['Core']),
'CUDA-5.5.22-GCC-4.8.2.eb': ('CUDA/5.5.22', 'Compiler/GCC/4.8.2', ['Compiler/GCC-CUDA/4.8.2-5.5.22'],
['Compiler/GCC-CUDA/4.8.2-5.5.22'], ['Core']),
impi_ec: ('impi/4.1.3.049', 'Compiler/intel/%s' % iccver, ['MPI/intel/%s/impi/4.1.3.049' % iccver],
['MPI/intel/%s/impi/4.1.3.049' % iccver], ['Core']),
imkl_ec: ('imkl/11.1.2.144', 'MPI/intel/%s/impi/4.1.3.049' % iccver, [],
[], ['Core']),
}
for ecfile, mns_vals in test_ecs.items():
test_ec(ecfile, *mns_vals)
# impi with dummy toolchain, which doesn't make sense in a hierarchical context
ec = EasyConfig(os.path.join(ecs_dir, 'impi-4.1.3.049.eb'))
self.assertErrorRegex(EasyBuildError, 'No compiler available.*MPI lib', ActiveMNS().det_modpath_extensions, ec)
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'CategorizedHMNS'
init_config(build_options=build_options)
# format: easyconfig_file: (short_mod_name, mod_subdir, modpath_exts, user_modpath_exts)
test_ecs = {
'GCC-4.7.2.eb': ('GCC/4.7.2', 'Core/compiler',
['Compiler/GCC/4.7.2/%s' % c for c in moduleclasses],
['Compiler/GCC/4.7.2']),
'OpenMPI-1.6.4-GCC-4.7.2.eb': ('OpenMPI/1.6.4', 'Compiler/GCC/4.7.2/mpi',
['MPI/GCC/4.7.2/OpenMPI/1.6.4/%s' % c for c in moduleclasses],
['MPI/GCC/4.7.2/OpenMPI/1.6.4']),
'gzip-1.5-goolf-1.4.10.eb': ('gzip/1.5', 'MPI/GCC/4.7.2/OpenMPI/1.6.4/tools',
[], []),
'goolf-1.4.10.eb': ('goolf/1.4.10', 'Core/toolchain',
[], []),
'icc-2013.5.192-GCC-4.8.3.eb': ('icc/%s' % iccver, 'Core/compiler',
['Compiler/intel/%s/%s' % (iccver, c) for c in moduleclasses],
['Compiler/intel/%s' % iccver]),
'ifort-2013.3.163.eb': ('ifort/2013.3.163', 'Core/compiler',
['Compiler/intel/2013.3.163/%s' % c for c in moduleclasses],
['Compiler/intel/2013.3.163']),
'CUDA-5.5.22-GCC-4.8.2.eb': ('CUDA/5.5.22', 'Compiler/GCC/4.8.2/system',
['Compiler/GCC-CUDA/4.8.2-5.5.22/%s' % c for c in moduleclasses],
['Compiler/GCC-CUDA/4.8.2-5.5.22']),
impi_ec: ('impi/4.1.3.049', 'Compiler/intel/%s/mpi' % iccver,
['MPI/intel/%s/impi/4.1.3.049/%s' % (iccver, c) for c in moduleclasses],
['MPI/intel/%s/impi/4.1.3.049' % iccver]),
imkl_ec: ('imkl/11.1.2.144', 'MPI/intel/%s/impi/4.1.3.049/numlib' % iccver,
[], []),
}
for ecfile, mns_vals in test_ecs.items():
test_ec(ecfile, *mns_vals, init_modpaths = ['Core/%s' % c for c in moduleclasses])
# impi with dummy toolchain, which doesn't make sense in a hierarchical context
ec = EasyConfig(os.path.join(ecs_dir, 'impi-4.1.3.049.eb'))
self.assertErrorRegex(EasyBuildError, 'No compiler available.*MPI lib', ActiveMNS().det_modpath_extensions, ec)
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'CategorizedModuleNamingScheme'
init_config(build_options=build_options)
test_ecs = {
'GCC-4.7.2.eb': ('compiler/GCC/4.7.2', '', [], [], []),
'OpenMPI-1.6.4-GCC-4.7.2.eb': ('mpi/OpenMPI/1.6.4-GCC-4.7.2', '', [], [], []),
'gzip-1.5-goolf-1.4.10.eb': ('tools/gzip/1.5-goolf-1.4.10', '', [], [], []),
'goolf-1.4.10.eb': ('toolchain/goolf/1.4.10', '', [], [], []),
'impi-4.1.3.049.eb': ('mpi/impi/4.1.3.049', '', [], [], []),
}
for ecfile, mns_vals in test_ecs.items():
test_ec(ecfile, *mns_vals)
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = self.orig_module_naming_scheme
init_config(build_options=build_options)
test_ecs = {
'GCC-4.7.2.eb': ('GCC/4.7.2', '', [], [], []),
'OpenMPI-1.6.4-GCC-4.7.2.eb': ('OpenMPI/1.6.4-GCC-4.7.2', '', [], [], []),
'gzip-1.5-goolf-1.4.10.eb': ('gzip/1.5-goolf-1.4.10', '', [], [], []),
'goolf-1.4.10.eb': ('goolf/1.4.10', '', [], [], []),
'impi-4.1.3.049.eb': ('impi/4.1.3.049', '', [], [], []),
}
for ecfile, mns_vals in test_ecs.items():
test_ec(ecfile, *mns_vals)
class TclModuleGeneratorTest(ModuleGeneratorTest):
"""Test for module_generator module for Tcl syntax."""
MODULE_GENERATOR_CLASS = ModuleGeneratorTcl
class LuaModuleGeneratorTest(ModuleGeneratorTest):
"""Test for module_generator module for Tcl syntax."""
MODULE_GENERATOR_CLASS = ModuleGeneratorLua
def suite():
""" returns all the testcases in this module """
suite = TestSuite()
suite.addTests(TestLoaderFiltered().loadTestsFromTestCase(TclModuleGeneratorTest, sys.argv[1:]))
suite.addTests(TestLoaderFiltered().loadTestsFromTestCase(LuaModuleGeneratorTest, sys.argv[1:]))
return suite
if __name__ == '__main__':
#logToScreen(enable=True)
#setLogLevelDebug()
TextTestRunner(verbosity=1).run(suite())
|
Caylo/easybuild-framework
|
test/framework/module_generator.py
|
Python
|
gpl-2.0
| 34,555
|
[
"NetCDF"
] |
bc2eb1ca7fadcf512f452a4d19da9fcea497c829bb637d7535f046186302b51b
|
__all__ = ["Galaxy", "Star", "SpeculativeTrade", "HexMap", "SubsectorMap2", "StatCalculation"]
|
makhidkarun/traveller_pyroute
|
PyRoute/__init__.py
|
Python
|
mit
| 95
|
[
"Galaxy"
] |
9fbccabe42792e0f10fcef71c3b5b3e58de4ddcc0439dba25db47c6a1b26e8d1
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudconnectoruser
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of CloudConnectorUser Avi RESTful Object
description:
- This module is used to configure CloudConnectorUser object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
name:
description:
- Name of the object.
required: true
private_key:
description:
- Private_key of cloudconnectoruser.
public_key:
description:
- Public_key of cloudconnectoruser.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a Cloud connector user that is used for integration into cloud platforms
avi_cloudconnectoruser:
controller: ''
name: root
password: ''
private_key: |
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----'
public_key: 'ssh-rsa ...'
tenant_ref: admin
username: ''
'''
RETURN = '''
obj:
description: CloudConnectorUser (api/cloudconnectoruser) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
name=dict(type='str', required=True),
private_key=dict(type='str', no_log=True,),
public_key=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloudconnectoruser',
set(['private_key']))
if __name__ == '__main__':
main()
|
tszym/ansible
|
lib/ansible/modules/network/avi/avi_cloudconnectoruser.py
|
Python
|
gpl-3.0
| 3,676
|
[
"VisIt"
] |
428ae1d324b0408d00b33057b4cfe5c524c893f184dd6a1d61acdb9222bb0b0d
|
# (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.netcdf._load_cube` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import iris.fileformats.cf
import mock
import netCDF4
import numpy as np
from iris.fileformats.netcdf import _load_cube
class TestFillValue(tests.IrisTest):
def setUp(self):
name = 'iris.fileformats.netcdf._assert_case_specific_facts'
patch = mock.patch(name)
patch.start()
self.addCleanup(patch.stop)
self.engine = mock.Mock()
self.cf = None
self.filename = 'DUMMY'
def _make_cf_var(self, dtype):
variable = mock.Mock(spec=netCDF4.Variable, dtype=dtype)
cf_var = mock.MagicMock(spec=iris.fileformats.cf.CFVariable,
cf_data=variable, cf_name='DUMMY_VAR',
cf_group=mock.Mock(), dtype=dtype,
shape=mock.MagicMock())
return cf_var
def _test(self, cf_var, expected_fill_value):
cube = _load_cube(self.engine, self.cf, cf_var, self.filename)
self.assertEqual(cube._my_data.fill_value, expected_fill_value)
def test_from_attribute_dtype_f4(self):
# A _FillValue attribute on the netCDF variable should end up as
# the fill_value for the cube.
dtype = np.dtype('f4')
cf_var = self._make_cf_var(dtype)
cf_var.cf_data._FillValue = mock.sentinel.FILL_VALUE
self._test(cf_var, mock.sentinel.FILL_VALUE)
def test_from_default_dtype_f4(self):
# Without an explicit _FillValue attribute on the netCDF
# variable, the fill value should be selected from the default
# netCDF fill values.
dtype = np.dtype('f4')
cf_var = self._make_cf_var(dtype)
self._test(cf_var, netCDF4.default_fillvals['f4'])
def test_from_attribute_dtype_i4(self):
# A _FillValue attribute on the netCDF variable should end up as
# the fill_value for the cube.
dtype = np.dtype('i4')
cf_var = self._make_cf_var(dtype)
cf_var.cf_data._FillValue = mock.sentinel.FILL_VALUE
self._test(cf_var, mock.sentinel.FILL_VALUE)
def test_from_default_dtype_i4(self):
# Without an explicit _FillValue attribute on the netCDF
# variable, the fill value should be selected from the default
# netCDF fill values.
dtype = np.dtype('i4')
cf_var = self._make_cf_var(dtype)
self._test(cf_var, netCDF4.default_fillvals['i4'])
def test_from_attribute_with_scale_offset(self):
# The _FillValue attribute still takes priority even when an
# offset/scale transformation takes place on the data.
dtype = np.dtype('i2')
cf_var = self._make_cf_var(dtype)
cf_var.scale_factor = np.float64(1.5)
cf_var.cf_data._FillValue = mock.sentinel.FILL_VALUE
self._test(cf_var, mock.sentinel.FILL_VALUE)
def test_from_default_with_scale_offset(self):
# The fill value should be related to the *non-scaled* dtype.
dtype = np.dtype('i2')
cf_var = self._make_cf_var(dtype)
cf_var.scale_factor = np.float64(1.5)
self._test(cf_var, netCDF4.default_fillvals['i2'])
if __name__ == "__main__":
tests.main()
|
scollis/iris
|
lib/iris/tests/unit/fileformats/netcdf/test__load_cube.py
|
Python
|
gpl-3.0
| 4,057
|
[
"NetCDF"
] |
198dd7408748374eba2b733471fb1d61e383f6d78dd50b4d1536840adbcd52ec
|
# -*- coding: utf-8 -*-
# Copyright 2017 by Rob Gilmore and Shaurita Hutchins. All rights reserved.
# Based on ClustalOmega wrapper copyright 2011 by Andreas Wilm.
#
# Wrapper for IQTree by Rob Gilmore (2017). http://www.iqtree.org/
# Used _ClustalOmega.py as template.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for IQ-Tree
Key Features:
# Efficient search algorithm: Fast and effective stochastic algorithm to reconstruct phylogenetic trees by maximum likelihood.
IQ-TREE compares favorably to RAxML and PhyML in terms of likelihood while requiring similar amount of computing time
(Nguyen et al., 2015).
# Ultrafast bootstrap: An ultrafast bootstrap approximation (UFBoot) to assess branch supports. UFBoot is 10 to 40 times
faster than RAxML rapid bootstrap and obtains less biased support values (Minh et al., 2013).
# Ultrafast model selection: An ultrafast and automatic model selection (ModelFinder) which is 10 to 100 times faster than
jModelTest and ProtTest. ModelFinder also finds best-fit partitioning scheme like PartitionFinder.
# Big Data Analysis: Supporting huge datasets with thousands of sequences or millions of alignment sites via checkpointing,
safe numerical and low memory mode. Multicore CPUs and parallel MPI system are utilized to speedup analysis.
# Phylogenetic testing: Several fast branch tests like SH-aLRT and aBayes test (Anisimova et al., 2011) and tree topology
tests like the approximately unbiased (AU) test (Shimodaira, 2002).
The strength of IQ-TREE is the availability of a wide variety of phylogenetic models:
# Common models: All common substitution models for DNA, protein, codon, binary and morphological data with rate
heterogeneity among sites and ascertainment bias correction for e.g. SNP data.
# Partition models: Allowing individual models for different genomic loci (e.g. genes or codon positions), mixed data types,
mixed rate heterogeneity types, linked or unlinked branch lengths between partitions.
# Mixture models: fully customizable mixture models and empirical protein mixture models and.
Polymorphism-aware models: Accounting for incomplete lineage sorting to infer species tree from genome-wide population
data (Schrempf et al., 2016)..
"""
from __future__ import print_function
from Bio.Application import _Option, AbstractCommandline
class IQTreeCommandline(AbstractCommandline):
u"""Command line wrapper for GUIDANCE2.
http://guidance.tau.ac.il/ver2/
Example:
--------
\>>> from Bio.Align.Applications import IQTreeCommandline
You would typically run the command line with clustalomega_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Citation:
---------
To maintain IQ-TREE, support users and secure fundings, it is important
for us that you cite the following papers, whenever the corresponding
features were applied for your analysis.
Example 1: We obtained branch supports with the ultrafast bootstrap (Minh et al., 2013) implemented in the
IQ-TREE software (Nguyen et al., 2015).
Example 2: We inferred the maximum-likelihood tree using the edge-linked partition model in
IQ-TREE (Chernomor et al., 2016; Nguyen et al., 2015).
################################################################################################################
# If you used ModelFinder please cite:
S. Kalyaanamoorthy, B.Q. Minh, T.K.F. Wong, A. von Haeseler, and L.S. Jermiin (2017) ModelFinder: Fast Model
Selection for Accurate Phylogenetic Estimates, Nature Methods, 14:587–589.
# If you performed tree reconstruction please cite:
L.-T. Nguyen, H.A. Schmidt, A. von Haeseler, and B.Q. Minh (2015) IQ-TREE: A fast and effective stochastic
algorithm for estimating maximum likelihood phylogenies. Mol. Biol. Evol., 32:268-274. DOI: 10.1093/molbev/msu300
# If you used partition models e.g., for phylogenomic analysis please cite:
O. Chernomor, A. von Haeseler, and B.Q. Minh (2016) Terrace aware data structure for phylogenomic inference
from supermatrices. Syst. Biol., 65:997-1008. DOI: 10.1093/sysbio/syw037
# If you performed the ultrafast bootstrap (UFBoot) please cite:
B.Q. Minh, M.A.T. Nguyen, and A. von Haeseler (2013) Ultrafast approximation for phylogenetic bootstrap.
Mol. Biol. Evol., 30:1188-1195. DOI: 10.1093/molbev/mst024
# If you used the polymorphism-aware models please cite:
D. Schrempf, B.Q. Minh, N. De Maio, A. von Haeseler, and C. Kosiol (2016) Reversible polymorphism-aware phylogenetic
models and their application to tree inference. J. Theor. Biol., 407:362–370. DOI: 10.1016/j.jtbi.2016.07.042
# If you used the IQ-TREE web server please cite:
J. Trifinopoulos, L.-T. Nguyen, A. von Haeseler, and B.Q. Minh (2016) W-IQ-TREE: a fast online phylogenetic tool
for maximum likelihood analysis. Nucleic Acids Res., 44 (W1):W232-W235. DOI: 10.1093/nar/gkw256"""
def __init__(self, cmd="iqtree", **kwargs):
self.parameters = \
[
_Option(['-s', 'alignment'],
"Input alignment in PHYLIP/FASTA/NEXUS/CLUSTAL/MSF format",
filename=True, equate=False,
is_required=True
),
_Option(['-st', 'dataType'],
"BIN, DNA, AA, NT2AA, CODON, MORPH (default: auto-detect)",
equate=False,
checker_function=lambda x: x in ['BIN', 'DNA', 'AA',
'NT2AA', 'CODON',
'MORPH', 'auto-detect']),
_Option(['', 'opts'],
"A placeholder to set additional parameters."
"e.g. -m <model-name> -o <outgroup_taxon> -quiet"
"-safe -mem RAM",
equate=False)
]
AbstractCommandline.__init__(self, cmd, **kwargs)
|
datasnakes/Datasnakes-Scripts
|
OrthoEvol/Orthologs/Phylogenetics/IQTree/iqtree.py
|
Python
|
mit
| 6,467
|
[
"Biopython"
] |
730124bd0b175f86d3218cb082f6045130eed993a4694b01aeec6279a4c282cc
|
# -*- coding: utf-8 -*-
"""methods_utils.py:
Some non-standard functions generic to moose.
This library may not be exposed to end-users. Intended for development by
the maintainer of this file.
Last modified: Sat Jan 18, 2014 05:01PM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import re
objPathPat = re.compile(r'(\/\w+\[\d+\])+?$')
def idPathToObjPath( idPath ):
""" Append a [0] if missing from idPath.
Id-paths do not have [0] at their end. This does not allow one to do
algebra properly.
"""
m = objPathPat.match( idPath )
if m: return idPath
else:
return '{}[0]'.format(idPath)
def main():
p1 = '/cable[0]/comp_[1]/a'
p2 = '/cab[1]/comp/com'
p3 = '/cab[1]/p[2]/c[3]'
p4 = '/ca__b[1]/_p[2]/c[122]'
for p in [p1, p2, p3, p4]:
m = objPathPat.match(p)
if m:
print(m.group(0))
else:
print(("{} is invalid Obj path in moose".format( p )))
if __name__ == '__main__':
main()
|
dharmasam9/moose-core
|
python/moose/methods_utils.py
|
Python
|
gpl-3.0
| 1,301
|
[
"MOOSE"
] |
bd74becfba23e18d0880866b63c207302588e8310d39b4788d6889e531731d1f
|
# -*- coding: utf-8 -*-
#
# Instant Press. Instant sites. CMS developed in Web2py Framework
# Site: http://www.instant2press.com
#
# Copyright (c) 2010 Mulone, Pablo Martín
#
# License Code: GPL, General Public License v. 2.0
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# http://groups.google.com/group/web2py-usuarios
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
def index():
#TODO Check page
try:
page = int(request.args[0])
except:
page = 1
if page>1:
response.front = False
else:
response.front = True
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
return dict(page=page)
def page_by_id():
try:
post_id = request.args[0]
except:
e_message = T("Problem with some submitted values")
e_title = T("Error 400!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
response.view='default/view.html'
post = i2p.articles.get_article_view_by_id(post_id)
if not post:
e_message = T("Sorry, but this article doesn't exist!")
e_title = T("Error 404!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(404, http_page)
return dict(post = post)
def page():
try:
name = request.args[0]
except:
e_message = T("Problem with some submitted values")
e_title = T("Error 400!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
response.view='default/view.html'
post_id = i2p.articles.get_article_id_from_name(name)
post = i2p.articles.get_article_view_by_id(post_id)
if not post:
e_message = T("Sorry, but this article doesn't exist!")
e_title = T("Error 404!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(404, http_page)
return dict(post = post)
def post():
try:
year = request.args[0]
month = request.args[1]
day = request.args[2]
name = request.args[3]
except:
e_message = T("Problem with some submitted values")
e_title = T("Error 400!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
response.view='default/view.html'
post_id = i2p.articles.get_article_id_from_date_name(year,month,day,name)
post = i2p.articles.get_article_view_by_id(post_id)
if not post:
e_message = T("Sorry, but this article doesn't exist!")
e_title = T("Error 404!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(404, http_page)
return dict(post = post)
def view():
try:
post_id = int(request.args[0])
except:
e_message = T("Problem with id value")
e_title = T("Error 400!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
post = i2p.articles.get_article_view_by_id(post_id)
if not post:
e_message = T("Sorry, but this article doesn't exist!")
e_title = T("Error 404!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(404, http_page)
return dict(post = post)
def preview():
if not check_credentials_is_admin(): #admin only
return
try:
post_id = int(request.args[0])
except:
e_message = T("Problem with id value")
e_title = T("Error 400!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
response.view='default/view.html'
post = i2p.articles.get_article_view_by_id(post_id,preview=True)
if not post:
e_message = T("Sorry, but this article doesn't exist!")
e_title = T("Error 404!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(404, http_page)
return dict(post = post)
def category():
try:
subarea = request.args[0]
except:
e_message = T("This function doesn't exist!")
e_title = T("Error 400!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
if subarea=="by_id":
try:
category = int(request.args[1])
except:
e_message = T("Problem with categorie id value!")
e_title = T("Error 400!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
try:
page = int(request.vars.page)
except:
page = 1
(posts, count_posts) = i2p.articles.get_last_posts_with_cat_id(page,category)
xml_posts = i2p.articles.get_xml_results_from_posts(posts)
xml_pages = i2p.articles.pagination_last_post_cat(page, count_posts, category)
return dict(page=page, category=category, posts=posts, count_posts=count_posts, \
xml_posts=xml_posts, xml_pages=xml_pages)
else:
e_message = T("This function doesn't exist!")
e_title = T("Error 400!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
def tag():
try:
tag = request.args[0]
except:
e_message = T("This function doesn't exist!")
e_title = T("Error 400!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
try:
page = int(request.vars.page)
except:
page = 1
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
(posts, count_posts) = i2p.articles.get_last_posts_with_tag_name(page,tag)
xml_posts = i2p.articles.get_xml_results_from_posts(posts)
xml_pages = i2p.articles.pagination_last_post_tag(page, count_posts, tag)
return dict(page=page, tag=tag, posts=posts, count_posts=count_posts, \
xml_posts=xml_posts, xml_pages=xml_pages)
def archives():
import datetime
try:
year = int(request.args[0])
month = int(request.args[1])
d_lower=datetime.date(year,month,1)
except:
e_message = T("There was a problem with values of Year - Month")
e_title = T("Error 400!")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
try:
page = int(request.vars.page)
except:
page = 1
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
(posts, count_posts) = i2p.articles.get_last_posts_archives_monthyear(page,year,month)
xml_posts = i2p.articles.get_xml_results_from_posts(posts)
xml_pages = i2p.articles.pagination_archive_monthyear(page, count_posts, year, month)
return dict(year=year, month=month, page=page, posts=posts, count_posts=count_posts, \
xml_posts=xml_posts, xml_pages=xml_pages)
def search():
try:
qvalue = request.vars.q
except:
e_title = T("Error 400!")
e_message = T("You need to submit your search text.")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
try:
page = int(request.vars.page)
except:
page = 1
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
(posts, count_posts) = i2p.articles.get_last_posts_with_search(page,qvalue)
xml_posts = i2p.articles.get_xml_results_from_posts(posts)
xml_pages = i2p.articles.pagination_last_post_search(page, count_posts, qvalue)
return dict(page=page, qvalue=qvalue, posts=posts, count_posts=count_posts, \
xml_posts=xml_posts, xml_pages=xml_pages)
################
## FEEDS ######
###############
@cache(request.env.path_info, time_expire=CACHE_TIME_EXPIRE_PAGES, cache_model=cache.ram)
def feed_articles():
#begin load custom modules
i2p.load_mod_siteinfo()
i2p.load_mod_articles()
i2p.define_siteinfo()
i2p.define_articles()
#end load custom modules
return response.render(i2p.articles.generate_rss_last_posts())
@cache(request.env.path_info, time_expire=CACHE_TIME_EXPIRE_PAGES, cache_model=cache.ram)
def feed_comments():
#begin load custom modules
i2p.load_mod_users()
i2p.load_mod_siteinfo()
i2p.load_mod_articles()
i2p.load_mod_comments()
i2p.define_siteinfo()
i2p.define_articles()
i2p.define_comments()
#end load custom modules
return response.render(i2p.articles.generate_rss_last_comments())
###########################
#json controllers
############################
@cache(request.env.path_info, time_expire=CACHE_TIME_EXPIRE_JSON, cache_model=cache.ram)
def json_get_sidebar_categories():
session.forget()
#begin load custom modules
i2p.load_mod_categories()
i2p.load_mod_articles()
i2p.load_mod_widgets()
i2p.define_categories()
i2p.define_articles()
#end load custom modules
info={}
info['html']=i2p.widgets.load_categories()
import gluon.contrib.simplejson as sj
return sj.dumps(info)
@cache(request.env.path_info, time_expire=CACHE_TIME_EXPIRE_JSON, cache_model=cache.ram)
def json_get_sidebar_last_posts():
session.forget()
#begin load custom modules
i2p.load_mod_articles()
i2p.load_mod_widgets()
i2p.define_articles()
#end load custom modules
info={}
info['html']=i2p.widgets.load_last_posts()
import gluon.contrib.simplejson as sj
return sj.dumps(info)
@cache(request.env.path_info, time_expire=CACHE_TIME_EXPIRE_JSON, cache_model=cache.ram)
def json_get_sidebar_tags():
session.forget()
#begin load custom modules
i2p.load_mod_articles()
i2p.define_articles()
#end load custom modules
info={}
info['html']=i2p.articles.get_popular_tags()
import gluon.contrib.simplejson as sj
return sj.dumps(info)
@cache(request.env.path_info, time_expire=CACHE_TIME_EXPIRE_JSON, cache_model=cache.ram)
def json_get_sidebar_pages():
session.forget()
#begin load custom modules
i2p.load_mod_articles()
i2p.load_mod_widgets()
i2p.define_articles()
#end load custom modules
info={}
info['html']=i2p.widgets.get_pages()
import gluon.contrib.simplejson as sj
return sj.dumps(info)
@cache(request.env.path_info, time_expire=CACHE_TIME_EXPIRE_JSON, cache_model=cache.ram)
def json_get_sidebar_archive():
session.forget()
#begin load custom modules
i2p.load_mod_articles()
i2p.define_articles()
#end load custom modules
info={}
info['html']=i2p.articles.get_list_archives()
import gluon.contrib.simplejson as sj
return sj.dumps(info)
@cache(request.env.path_info, time_expire=CACHE_TIME_EXPIRE_JSON, cache_model=cache.ram)
def json_get_sidebar_links():
session.forget()
#begin load custom modules
i2p.load_mod_articles()
i2p.load_mod_links()
i2p.define_links()
#end load custom modules
info={}
info['html']=i2p.articles.get_list_links()
import gluon.contrib.simplejson as sj
return sj.dumps(info)
@cache(request.env.path_info, time_expire=CACHE_TIME_EXPIRE_JSON, cache_model=cache.ram)
def json_get_sidebar_last_comments():
session.forget()
#begin load custom modules
i2p.load_mod_users()
i2p.load_mod_articles()
i2p.load_mod_comments()
i2p.load_mod_widgets()
i2p.load_mod_images()
i2p.define_articles()
i2p.define_comments()
i2p.define_avatars()
#end load custom modules
info={}
info['html']=i2p.widgets.load_last_comments()
import gluon.contrib.simplejson as sj
return sj.dumps(info)
def json_get_comments_count():
session.forget()
#begin load custom modules
i2p.load_mod_comments()
i2p.define_comments()
#end load custom modules
try:
id = int(request.vars.id)
except:
return json_response(message= T("The comment id doesn't exist"),\
success=0,alert=2)
else:
return i2p.comments.count(id)
def json_get_comments_title():
session.forget()
#begin load custom modules
i2p.load_mod_articles()
i2p.load_mod_comments()
i2p.define_articles()
i2p.define_comments()
#end load custom modules
try:
id = int(request.vars.id)
except:
return json_response(message= T("The article id or page number doesn't exist"),\
success=0,alert=2)
else:
return i2p.comments.generate_title(id)
def json_get_comments_form():
#begin load custom modules
i2p.load_mod_users()
i2p.load_mod_comments()
#end load custom modules
try:
id = int(request.vars.id)
except:
return json_response(message= T("The article id doesn't exist"),\
success=0,alert=2)
else:
return i2p.comments.generate_reply(id)
def json_get_comments_from_post():
session.forget()
#begin load custom modules
i2p.load_mod_users()
i2p.load_mod_articles()
i2p.load_mod_comments()
i2p.load_mod_widgets()
i2p.load_mod_images()
i2p.define_articles()
i2p.define_comments()
i2p.define_avatars()
#end load custom modules
try:
id = int(request.vars.id)
page = int(request.vars.page)
except:
return json_response(message= T("The article id or page number doesn't exist"),\
success=0,alert=2)
else:
return i2p.comments.get_all(id,page)
def json_get_comments_from_post_admin():
#begin load custom modules
i2p.load_mod_users()
i2p.load_mod_articles()
i2p.load_mod_comments()
i2p.load_mod_widgets()
i2p.load_mod_images()
i2p.define_articles()
i2p.define_comments()
i2p.define_avatars()
#end load custom modules
if not check_credentials_is_admin():
return _common_json_response(message= T("You need to sign in as an admin"),\
success=0,alert=2)
try:
id = int(request.vars.id)
page = int(request.vars.page)
except:
return json_response(message= T("The article id or page number doesn't exist"),\
success=0,alert=2)
else:
return i2p.comments.get_all(id,page,True)
def json_check_user_is_log_in():
value = False
if is_user_logged_in():
value = True
info={}
info['value']=value
import gluon.contrib.simplejson as sj
return sj.dumps(info)
@auth.requires_login()
def json_new_comment():
#begin load custom modules
i2p.load_mod_articles()
i2p.load_mod_comments()
i2p.define_articles()
i2p.define_comments()
#end load custom modules
try:
id_reply = int(request.vars.idreply)
id_post = int(request.vars.idpost)
value = request.vars.value
except:
return json_response(message= T("The article id, page number, or reply doesn't exist"), \
success=0,alert=2)
else:
return i2p.comments.add(id_reply,id_post,value)
##################################
###### USER ACTIONS ##############
##################################
#this change the avatar user and generate the current thumbnail of the image
@auth.requires_login()
def change_avatar():
import datetime
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
if not i2p.config.avatars_enabled:
e_title = T("Error 400!")
e_message = T("Avatars are disable.")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
user_id = session.auth.user.id
avatars = db(db.avatars.user_id == user_id).select()
if not avatars:
newid = db.avatars.insert(user_id=user_id)
avatars = db(db.avatars.user_id == user_id).select()
if avatars:
avatar = avatars[0]
form = SQLFORM(db.avatars, avatar, upload=URL('download'), showid=False)
if form.accepts(request.vars, session):
response.flash = T('Avatar uploaded')
redirect(URL('index'))
return dict(form=form)
else:
e_title = T("Error 400!")
e_message = T("Problem with avatars")
http_page = pretty_exceptions(e_title,e_message)
raise HTTP(400, http_page)
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
i2p.load_mod_images()
i2p.define_images()
i2p.define_avatars()
return response.download(request,db)
def fast_download():
i2p.load_mod_images()
i2p.define_images()
i2p.define_avatars()
# very basic security (only allow fast_download on your_table.upload_field):
if not request.args(0).startswith("images.image"):
return download()
elif not request.args(0).startswith("images.thumb"):
return download()
if not request.args(0).startswith("avatars.image"):
return download()
elif not request.args(0).startswith("avatars.thumb"):
return download()
# remove/add headers that prevent/favors client-side caching
del response.headers['Cache-Control']
del response.headers['Pragma']
del response.headers['Expires']
filename = os.path.join(request.folder,'uploads',request.args(0))
# send last modified date/time so client browser can enable client-side caching
response.headers['Last-Modified'] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime(os.path.getmtime(filename)))
return response.stream(open(filename,'rb'))
def user():
#begin load all base modules
i2p.load_mod_common() #load common modules
i2p.db_definitions() #define tables
#end load all base modules
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
|
LispyAriaro/instant-press
|
controllers/default.py
|
Python
|
gpl-2.0
| 21,688
|
[
"VisIt"
] |
7146d5f1f71bc79d4f1cc0d0c0238b49baaa9ddbb4040ac475fc50bdd03374cf
|
"""Asap utility functions
This module defines the following functions:
PrintVersion
"""
__docformat__ = "restructuredtext en"
from asap3.Internal.Builtins import _asap, get_version, get_short_version
from asap3 import __file__ as _asapfile
import ase
import sys
import os
def print_version(level = 0):
"""Print the version number of the loaded version of Asap.
If the optional argument is 1, also prints the pathnames of the
most important files.
"""
try:
compiledfile = _asap.__file__
except AttributeError:
compiledfile = "<built-in>"
print get_version()
if level >= 1:
print " Python module:", _asapfile
print " C++ module: ", compiledfile
print " ase module: ", ase.__file__
def DebugOutput(filename, stdout=1, nomaster=False, sync=True):
"""Debugging output on each node goes to a different file.
Redirect stderr to a different file on each node. The filename should
contain %d, which is replaced by the node number. The file is opened
with minimal buffering, and stderr (standard error) is redirected to
it (also for C/C++ extensions). If the optional argument stdout is
true (the default), Python's sys.stdout is also redirected to the
same file. Standard output for C/C++ extensions is never touched.
This is mainly useful for parallel simulations.
"""
if stdout:
sys.stdout = sys.stderr
try:
import asap3.mpi
node = asap3.mpi.world.rank
except (AttributeError, ImportError):
node = 0
if nomaster and node == 0:
return
flag = os.O_WRONLY|os.O_CREAT|os.O_TRUNC
if sync:
flag = flag|os.O_SYNC
newerror = os.open((filename % (node,)), flag, 0660)
os.dup2(newerror, sys.stderr.fileno())
# This Python file must NOT go away. Attach it to the sys module.
sys._AsapStandardError = newerror
def print_memory(txt, a=None):
import asap3.mpi
procfile = open("/proc/self/status")
vmsize = vmpeak = vmdata = vmrss = -1
for line in procfile:
words = line.split()
if words[0] == "VmSize:":
vmsize = int(words[1])
elif words[0] == "VmPeak:":
vmpeak = int(words[1])
elif words[0] == "VmData:":
vmdata = int(words[1])
elif words[0] == "VmRSS:":
vmrss = int(words[1])
print >>sys.stderr, "Memory [proc %d '%s']: %d MB total (%d MB peak, %d MB data, %d MB rss)" % (
asap3.mpi.world.rank, txt, (vmsize+512) / 1024,
(vmpeak+512) / 1024, (vmdata+512) / 1024, (vmrss+512)/1024)
procfile.close()
if a is not None:
memory_usage(a)
def memory_usage(obj, total=True):
"""Print the memory usage of some kinds of objects.
Supported objects are: atoms, EMT calculators and neighbor lists.
"""
mem = 0
if hasattr(obj, "arrays"):
mem += _memory_usage_atoms(obj)
try:
calc = obj.get_calculator()
except AttributeError:
calc = None
if calc is not None:
mem += memory_usage(calc, total=False)
elif hasattr(obj, "print_memory"):
mem += obj.print_memory()
else:
print "*MEM* Memory usage of this object is not supported:", obj
return 0
if total:
print "*MEM* Total %d MB." % (mem,)
return mem
def _memory_usage_atoms(atoms):
arr = atoms.arrays
mem = 0
nat = len(atoms)
nvar = 0
megabyte = 1024*1024
for k in arr.keys():
mem += arr[k].size * arr[k].itemsize
nvar += 1
gmem = 0
gvar = 0
if hasattr(atoms, "ghosts"):
arr = atoms.ghosts
for k in arr.keys():
gmem += arr[k].size * arr[k].itemsize
gvar += 1
mem = (mem + gmem + megabyte/2) / megabyte
gmem = (gmem + megabyte/2) / megabyte
print "*MEM* Atoms %d MB. [ %d atoms, %d arrays, %d gh_arr of %d MB ]" % (
mem, nat, nvar, gvar, gmem)
return mem
|
auag92/n2dm
|
Asap-3.8.4/Python/asap3/Internal/UtilityFunctions.py
|
Python
|
mit
| 3,989
|
[
"ASE"
] |
e10f33e93564e93356351b9fa1357ff81807e8404a58a97574102e192e645b1c
|
"""This demo program solves Poisson's equation
- div C grad u(x, y) = f(x, y)
on the unit square with source f given by
f(x, y) = 10*exp(-((x - 0.5)^2 + (y - 0.5)^2) / 0.02)
and boundary conditions given by
u(x, y) = 0 for x = 0 or x = 1
du/dn(x, y) = 0 for y = 0 or y = 1
The conductivity C is a symmetric 2 x 2 matrix which
varies throughout the domain. In the left part of the
domain, the conductivity is
C = ((1, 0.3), (0.3, 2))
and in the right part it is
C = ((3, 0.5), (0.5, 4))
The data files where these values are stored are generated
by the program generate_data.py
This demo is dedicated to BF and Marius... ;-)
"""
# Copyright (C) 2009-2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2009-12-16
# Last changed: 2011-06-28
# Begin demo
from dolfin import *
# Read mesh from file and create function space
mesh = Mesh("../unitsquare_32_32.xml.gz")
V = FunctionSpace(mesh, "Lagrange", 1)
# Define Dirichlet boundary (x = 0 or x = 1)
def boundary(x):
return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS
# Define boundary condition
u0 = Constant(0.0)
bc = DirichletBC(V, u0, boundary)
# Code for C++ evaluation of conductivity
conductivity_code = """
class Conductivity : public Expression
{
public:
// Create expression with 3 components
Conductivity() : Expression(3) {}
// Function for evaluating expression on each cell
void eval(Array<double>& values, const Array<double>& x, const ufc::cell& cell) const
{
const uint D = cell.topological_dimension;
const uint cell_index = cell.index;
values[0] = (*c00)[cell_index];
values[1] = (*c01)[cell_index];
values[2] = (*c11)[cell_index];
}
// The data stored in mesh functions
std::shared_ptr<MeshFunction<double> > c00;
std::shared_ptr<MeshFunction<double> > c01;
std::shared_ptr<MeshFunction<double> > c11;
};
"""
# Define conductivity expression and matrix
c00 = MeshFunction("double", mesh, "../unitsquare_32_32_c00.xml.gz")
c01 = MeshFunction("double", mesh, "../unitsquare_32_32_c01.xml.gz")
c11 = MeshFunction("double", mesh, "../unitsquare_32_32_c11.xml.gz")
c = Expression(cppcode=conductivity_code)
c.c00 = c00
c.c01 = c01
c.c11 = c11
C = as_matrix(((c[0], c[1]), (c[1], c[2])))
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)")
a = inner(C*grad(u), grad(v))*dx
L = f*v*dx
# Compute solution
u = Function(V)
solve(a == L, u, bc)
# Save solution in VTK format
file = File("poisson.pvd")
file << u
# Plot solution
plot(u, interactive=True)
|
MiroK/dolfin
|
demo/documented/tensor-weighted-poisson/python/demo_tensor-weighted-poisson.py
|
Python
|
gpl-3.0
| 3,252
|
[
"VTK"
] |
81e6cf1f34756675cfcb14d29ef6f65a985616d7b62377e0c4fea74f4e07a915
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author:
- "Jeroen Hoekx (@jhoekx)"
- "Alexander Bulimov (@abulimov)"
module: lvol
short_description: Configure LVM logical volumes
description:
- This module creates, removes or resizes logical volumes.
version_added: "1.1"
options:
vg:
description:
- The volume group this logical volume is part of.
required: true
lv:
description:
- The name of the logical volume.
required: true
size:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
resizing is not supported with percentages. Float values must begin
with a digit.
state:
choices: [ "present", "absent" ]
default: present
description:
- Control if the logical volume exists. If C(present) the C(size) option
is required.
required: false
force:
version_added: "1.5"
choices: [ "yes", "no" ]
default: "no"
description:
- Shrink or remove operations of volumes requires this switch. Ensures that
that filesystems get never corrupted/destroyed by mistake.
required: false
opts:
version_added: "2.0"
description:
- Free-form options to be passed to the lvcreate command
snapshot:
version_added: "2.1"
description:
- The name of the snapshot volume
required: false
notes:
- Filesystems on top of the volume are not resized.
'''
EXAMPLES = '''
# Create a logical volume of 512m.
- lvol: vg=firefly lv=test size=512
# Create a logical volume of 512g.
- lvol: vg=firefly lv=test size=512g
# Create a logical volume the size of all remaining space in the volume group
- lvol: vg=firefly lv=test size=100%FREE
# Create a logical volume with special options
- lvol: vg=firefly lv=test size=512g opts="-r 16"
# Extend the logical volume to 1024m.
- lvol: vg=firefly lv=test size=1024
# Reduce the logical volume to 512m
- lvol: vg=firefly lv=test size=512 force=yes
# Remove the logical volume.
- lvol: vg=firefly lv=test state=absent force=yes
# Create a snapshot volume of the test logical volume.
- lvol: vg=firefly lv=test snapshot=snap1 size=100m
'''
import re
decimal_point = re.compile(r"(\.|,)")
def mkversion(major, minor, patch):
return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
def parse_lvs(data):
lvs = []
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
'name': parts[0],
'size': int(decimal_point.split(parts[1])[0]),
})
return lvs
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0:
return None
m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m:
return None
return mkversion(m.group(1), m.group(2), m.group(3))
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(required=True),
lv=dict(required=True),
size=dict(type='str'),
opts=dict(type='str'),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
snapshot=dict(type='str', default=None),
),
supports_check_mode=True,
)
# Determine if the "--yes" option should be used
version_found = get_lvm_version(module)
if version_found == None:
module.fail_json(msg="Failed to get LVM version number")
version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
if version_found >= version_yesopt:
yesopt = "--yes"
else:
yesopt = ""
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
opts = module.params['opts']
state = module.params['state']
force = module.boolean(module.params['force'])
size_opt = 'L'
size_unit = 'm'
snapshot = module.params['snapshot']
if opts is None:
opts = ""
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
size_parts = size.split('%', 1)
size_percent = int(size_parts[0])
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
if size_whole == 'ORIGIN':
module.fail_json(msg="Snapshot Volumes are not supported")
elif size_whole not in ['VG', 'PVS', 'FREE']:
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
size_opt = 'l'
size_unit = ''
if not '%' in size:
# LVCREATE(8) -L --size option unit
if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower()
size = size[0:-1]
try:
float(size)
if not size[0].isdigit(): raise ValueError()
except ValueError:
module.fail_json(msg="Bad size specification of '%s'" % size)
# when no unit, megabytes by default
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
"%s --noheadings --nosuffix -o lv_name,size --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
changed = False
lvs = parse_lvs(current_lvs)
if snapshot is None:
check_lv = lv
else:
check_lv = snapshot
for test_lv in lvs:
if test_lv['name'] == check_lv:
this_lv = test_lv
break
else:
this_lv = None
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
else:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
msg = ''
if this_lv is None:
if state == 'present':
### create LV
if module.check_mode:
changed = True
else:
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
if snapshot is not None:
cmd = "%s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
else:
cmd = "%s %s -n %s -%s %s%s %s %s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg)
rc, _, err = module.run_command(cmd)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
### remove LV
if module.check_mode:
module.exit_json(changed=True)
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
rc, _, err = module.run_command("%s --force %s/%s" % (lvremove_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
elif size_opt == 'l':
module.exit_json(changed=False, msg="Resizing extents with percentage not supported.")
else:
### resize LV
tool = None
if int(size) > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
elif int(size) < this_lv['size']:
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
if module.check_mode:
changed = True
else:
cmd = "%s -%s %s%s %s/%s" % (tool, size_opt, size, size_unit, vg, this_lv['name'])
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
pmazurek/ansible-modules-extras
|
system/lvol.py
|
Python
|
gpl-3.0
| 10,210
|
[
"Firefly"
] |
eae97e6d8ba2c1ff86bb7b609388929f1669b252d8d589e351476e03ab66c106
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import ast
import inspect
import itertools
import sys
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from types import FrameType, ModuleType
from typing import (
Any,
Callable,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
get_type_hints,
)
from pants.engine.engine_aware import SideEffecting
from pants.engine.goal import Goal
from pants.engine.internals.selectors import AwaitableConstraints
from pants.engine.internals.selectors import Effect as Effect # noqa: F401
from pants.engine.internals.selectors import Get as Get # noqa: F401
from pants.engine.internals.selectors import MultiGet as MultiGet # noqa: F401
from pants.engine.unions import UnionRule
from pants.option.subsystem import Subsystem
from pants.util.collections import assert_single_element
from pants.util.logging import LogLevel
from pants.util.memo import memoized
from pants.util.meta import frozen_after_init
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
class _RuleVisitor(ast.NodeVisitor):
"""Pull `Get` calls out of an @rule body."""
def __init__(self, *, resolve_type: Callable[[str], Type[Any]], source_file_name: str) -> None:
super().__init__()
self.source_file_name = source_file_name
self.resolve_type = resolve_type
self.awaitables: List[AwaitableConstraints] = []
def visit_Call(self, call_node: ast.Call) -> None:
signature = AwaitableConstraints.signature_from_call_node(
call_node, source_file_name=self.source_file_name
)
if signature is not None:
product_str, subject_str, effect = signature
awaitable = AwaitableConstraints(
self.resolve_type(product_str), self.resolve_type(subject_str), effect
)
self.awaitables.append(awaitable)
# Ensure we descend into e.g. MultiGet(Get(...)...) calls.
self.generic_visit(call_node)
# NB: This violates Python naming conventions of using snake_case for functions. This is because
# SubsystemRule behaves very similarly to UnionRule and RootRule, and we want to use the same
# naming scheme.
#
# We could refactor this to be a class with __call__() defined, but we would lose the `@memoized`
# decorator.
@memoized
def SubsystemRule(subsystem: Type[Subsystem]) -> TaskRule:
"""Returns a TaskRule that constructs an instance of the subsystem."""
return TaskRule(**subsystem.signature())
def _get_starting_indent(source):
"""Used to remove leading indentation from `source` so ast.parse() doesn't raise an
exception."""
if source.startswith(" "):
return sum(1 for _ in itertools.takewhile(lambda c: c in {" ", b" "}, source))
return 0
class RuleType(Enum):
rule = "rule"
goal_rule = "goal_rule"
uncacheable_rule = "_uncacheable_rule"
def _make_rule(
func_id: str,
rule_type: RuleType,
return_type: Type,
parameter_types: Iterable[Type],
*,
cacheable: bool,
canonical_name: str,
desc: Optional[str],
level: LogLevel,
) -> Callable[[Callable], Callable]:
"""A @decorator that declares that a particular static function may be used as a TaskRule.
:param rule_type: The specific decorator used to declare the rule.
:param return_type: The return/output type for the Rule. This must be a concrete Python type.
:param parameter_types: A sequence of types that matches the number and order of arguments to
the decorated function.
:param cacheable: Whether the results of executing the Rule should be cached as keyed by all of
its inputs.
"""
is_goal_cls = issubclass(return_type, Goal)
if rule_type == RuleType.rule and is_goal_cls:
raise TypeError(
"An `@rule` that returns a `Goal` must instead be declared with `@goal_rule`."
)
if rule_type == RuleType.goal_rule and not is_goal_cls:
raise TypeError("An `@goal_rule` must return a subclass of `engine.goal.Goal`.")
def wrapper(func):
if not inspect.isfunction(func):
raise ValueError("The @rule decorator must be applied innermost of all decorators.")
owning_module = sys.modules[func.__module__]
source = inspect.getsource(func) or "<string>"
source_file = inspect.getsourcefile(func)
beginning_indent = _get_starting_indent(source)
if beginning_indent:
source = "\n".join(line[beginning_indent:] for line in source.split("\n"))
module_ast = ast.parse(source)
def resolve_type(name):
resolved = getattr(owning_module, name, None) or owning_module.__builtins__.get(
name, None
)
if resolved is None:
raise ValueError(
f"Could not resolve type `{name}` in top level of module "
f"{owning_module.__name__} defined in {source_file}"
)
elif not isinstance(resolved, type):
raise ValueError(
f"Expected a `type` constructor for `{name}`, but got: {resolved} (type "
f"`{type(resolved).__name__}`) in {source_file}"
)
return resolved
rule_func_node = assert_single_element(
node
for node in ast.iter_child_nodes(module_ast)
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef))
and node.name == func.__name__
)
parents_table = {}
for parent in ast.walk(rule_func_node):
for child in ast.iter_child_nodes(parent):
parents_table[child] = parent
rule_visitor = _RuleVisitor(source_file_name=source_file, resolve_type=resolve_type)
rule_visitor.visit(rule_func_node)
awaitables = FrozenOrderedSet(rule_visitor.awaitables)
validate_requirements(func_id, parameter_types, awaitables, cacheable)
# Set our own custom `__line_number__` dunder so that the engine may visualize the line number.
func.__line_number__ = func.__code__.co_firstlineno
func.rule = TaskRule(
return_type,
parameter_types,
func,
input_gets=awaitables,
canonical_name=canonical_name,
desc=desc,
level=level,
cacheable=cacheable,
)
return func
return wrapper
class InvalidTypeAnnotation(TypeError):
"""Indicates an incorrect type annotation for an `@rule`."""
class UnrecognizedRuleArgument(TypeError):
"""Indicates an unrecognized keyword argument to a `@rule`."""
class MissingTypeAnnotation(TypeError):
"""Indicates a missing type annotation for an `@rule`."""
class MissingReturnTypeAnnotation(InvalidTypeAnnotation):
"""Indicates a missing return type annotation for an `@rule`."""
class MissingParameterTypeAnnotation(InvalidTypeAnnotation):
"""Indicates a missing parameter type annotation for an `@rule`."""
def _ensure_type_annotation(
*,
type_annotation: Optional[Type],
name: str,
raise_type: Type[InvalidTypeAnnotation],
) -> Type:
if type_annotation is None:
raise raise_type(f"{name} is missing a type annotation.")
if not isinstance(type_annotation, type):
raise raise_type(
f"The annotation for {name} must be a type, got {type_annotation} of type {type(type_annotation)}."
)
return type_annotation
PUBLIC_RULE_DECORATOR_ARGUMENTS = {"canonical_name", "desc", "level"}
# We don't want @rule-writers to use 'rule_type' or 'cacheable' as kwargs directly,
# but rather set them implicitly based on the rule annotation.
# So we leave it out of PUBLIC_RULE_DECORATOR_ARGUMENTS.
IMPLICIT_PRIVATE_RULE_DECORATOR_ARGUMENTS = {"rule_type", "cacheable"}
def rule_decorator(func, **kwargs) -> Callable:
if not inspect.isfunction(func):
raise ValueError("The @rule decorator expects to be placed on a function.")
if (
len(
set(kwargs)
- PUBLIC_RULE_DECORATOR_ARGUMENTS
- IMPLICIT_PRIVATE_RULE_DECORATOR_ARGUMENTS
)
!= 0
):
raise UnrecognizedRuleArgument(
f"`@rule`s and `@goal_rule`s only accept the following keyword arguments: {PUBLIC_RULE_DECORATOR_ARGUMENTS}"
)
rule_type: RuleType = kwargs["rule_type"]
cacheable: bool = kwargs["cacheable"]
func_id = f"@rule {func.__module__}:{func.__name__}"
type_hints = get_type_hints(func)
return_type = _ensure_type_annotation(
type_annotation=type_hints.get("return"),
name=f"{func_id} return",
raise_type=MissingReturnTypeAnnotation,
)
parameter_types = tuple(
_ensure_type_annotation(
type_annotation=type_hints.get(parameter),
name=f"{func_id} parameter {parameter}",
raise_type=MissingParameterTypeAnnotation,
)
for parameter in inspect.signature(func).parameters
)
is_goal_cls = issubclass(return_type, Goal)
# Set a default canonical name if one is not explicitly provided to the module and name of the
# function that implements it. This is used as the workunit name.
effective_name = kwargs.get("canonical_name", f"{func.__module__}.{func.__name__}")
# Set a default description, which is used in the dynamic UI and stacktraces.
effective_desc = kwargs.get("desc")
if effective_desc is None and is_goal_cls:
effective_desc = f"`{return_type.name}` goal"
effective_level = kwargs.get("level", LogLevel.TRACE)
if not isinstance(effective_level, LogLevel):
raise ValueError(
"Expected to receive a value of type LogLevel for the level "
f"argument, but got: {effective_level}"
)
return _make_rule(
func_id,
rule_type,
return_type,
parameter_types,
cacheable=cacheable,
canonical_name=effective_name,
desc=effective_desc,
level=effective_level,
)(func)
def validate_requirements(
func_id: str,
parameter_types: Tuple[Type, ...],
awaitables: Tuple[AwaitableConstraints, ...],
cacheable: bool,
) -> None:
if not cacheable:
return
# TODO: Technically this will also fire for an @_uncacheable_rule, but we don't expose those as
# part of the API, so it's OK for these errors not to mention them.
for ty in parameter_types:
if cacheable and issubclass(ty, SideEffecting):
raise ValueError(
f"A `@rule` that is not a @goal_rule ({func_id}) may not have "
f"a side-effecting parameter: {ty}."
)
for awaitable in awaitables:
input_type_side_effecting = issubclass(awaitable.input_type, SideEffecting)
if input_type_side_effecting and not awaitable.is_effect:
raise ValueError(
f"A `Get` may not request a side-effecting type ({awaitable.input_type}). "
f"Use `Effect` instead: `{awaitable}`."
)
if not input_type_side_effecting and awaitable.is_effect:
raise ValueError(
f"An `Effect` should not be used with a pure type ({awaitable.input_type}). "
f"Use `Get` instead: `{awaitable}`."
)
if cacheable and awaitable.is_effect:
raise ValueError(
f"A `@rule` that is not a @goal_rule ({func_id}) may not use an "
f"Effect: `{awaitable}`."
)
def inner_rule(*args, **kwargs) -> Callable:
if len(args) == 1 and inspect.isfunction(args[0]):
return rule_decorator(*args, **kwargs)
else:
def wrapper(*args):
return rule_decorator(*args, **kwargs)
return wrapper
def rule(*args, **kwargs) -> Callable:
return inner_rule(*args, **kwargs, rule_type=RuleType.rule, cacheable=True)
def goal_rule(*args, **kwargs) -> Callable:
if "level" not in kwargs:
kwargs["level"] = LogLevel.DEBUG
return inner_rule(*args, **kwargs, rule_type=RuleType.goal_rule, cacheable=False)
# This has a "private" name, as we don't (yet?) want it to be part of the rule API, at least
# until we figure out the implications, and have a handle on the semantics and use-cases.
def _uncacheable_rule(*args, **kwargs) -> Callable:
return inner_rule(*args, **kwargs, rule_type=RuleType.uncacheable_rule, cacheable=False)
class Rule(ABC):
"""Rules declare how to produce products for the product graph.
A rule describes what dependencies must be provided to produce a particular product. They also
act as factories for constructing the nodes within the graph.
"""
@property
@abstractmethod
def output_type(self):
"""An output `type` for the rule."""
def collect_rules(*namespaces: Union[ModuleType, Mapping[str, Any]]) -> Iterable[Rule]:
"""Collects all @rules in the given namespaces.
If no namespaces are given, collects all the @rules in the caller's module namespace.
"""
if not namespaces:
currentframe = inspect.currentframe()
assert isinstance(currentframe, FrameType)
caller_frame = currentframe.f_back
caller_module = inspect.getmodule(caller_frame)
assert isinstance(caller_module, ModuleType)
namespaces = (caller_module,)
def iter_rules():
for namespace in namespaces:
mapping = namespace.__dict__ if isinstance(namespace, ModuleType) else namespace
for name, item in mapping.items():
if not callable(item):
continue
rule = getattr(item, "rule", None)
if isinstance(rule, TaskRule):
for input in rule.input_selectors:
if issubclass(input, Subsystem):
yield SubsystemRule(input)
if issubclass(rule.output_type, Goal):
yield SubsystemRule(rule.output_type.subsystem_cls)
yield rule
return list(iter_rules())
@frozen_after_init
@dataclass(unsafe_hash=True)
class TaskRule(Rule):
"""A Rule that runs a task function when all of its input selectors are satisfied.
NB: This API is not meant for direct consumption. To create a `TaskRule` you should always
prefer the `@rule` constructor.
"""
_output_type: Type
input_selectors: Tuple[Type, ...]
input_gets: Tuple[AwaitableConstraints, ...]
func: Callable
cacheable: bool
canonical_name: str
desc: Optional[str]
level: LogLevel
def __init__(
self,
output_type: Type,
input_selectors: Iterable[Type],
func: Callable,
input_gets: Iterable[AwaitableConstraints],
canonical_name: str,
desc: Optional[str] = None,
level: LogLevel = LogLevel.TRACE,
cacheable: bool = True,
) -> None:
self._output_type = output_type
self.input_selectors = tuple(input_selectors)
self.input_gets = tuple(input_gets)
self.func = func
self.cacheable = cacheable
self.canonical_name = canonical_name
self.desc = desc
self.level = level
def __str__(self):
return "(name={}, {}, {!r}, {}, gets={})".format(
getattr(self, "name", "<not defined>"),
self.output_type.__name__,
self.input_selectors,
self.func.__name__,
self.input_gets,
)
@property
def output_type(self):
return self._output_type
@frozen_after_init
@dataclass(unsafe_hash=True)
class QueryRule(Rule):
"""A QueryRule declares that a given set of Params will be used to request an output type.
Every callsite to `Scheduler.product_request` should have a corresponding QueryRule to ensure
that the relevant portions of the RuleGraph are generated.
"""
_output_type: Type
input_types: Tuple[Type, ...]
def __init__(self, output_type: Type, input_types: Sequence[Type]) -> None:
self._output_type = output_type
self.input_types = tuple(input_types)
@property
def output_type(self):
return self._output_type
@dataclass(frozen=True)
class RuleIndex:
"""Holds a normalized index of Rules used to instantiate Nodes."""
rules: FrozenOrderedSet[TaskRule]
queries: FrozenOrderedSet[QueryRule]
union_rules: FrozenOrderedSet[UnionRule]
@classmethod
def create(cls, rule_entries: Iterable[Rule | UnionRule]) -> RuleIndex:
"""Creates a RuleIndex with tasks indexed by their output type."""
rules: OrderedSet[TaskRule] = OrderedSet()
queries: OrderedSet[QueryRule] = OrderedSet()
union_rules: OrderedSet[UnionRule] = OrderedSet()
for entry in rule_entries:
if isinstance(entry, TaskRule):
rules.add(entry)
elif isinstance(entry, UnionRule):
union_rules.add(entry)
elif isinstance(entry, QueryRule):
queries.add(entry)
elif hasattr(entry, "__call__"):
rule = getattr(entry, "rule", None)
if rule is None:
raise TypeError(f"Expected function {entry} to be decorated with @rule.")
rules.add(rule)
else:
raise TypeError(
f"Rule entry {entry} had an unexpected type: {type(entry)}. Rules either "
"extend Rule or UnionRule, or are static functions decorated with @rule."
)
return RuleIndex(
rules=FrozenOrderedSet(rules),
queries=FrozenOrderedSet(queries),
union_rules=FrozenOrderedSet(union_rules),
)
|
patricklaw/pants
|
src/python/pants/engine/rules.py
|
Python
|
apache-2.0
| 18,044
|
[
"VisIt"
] |
67a41033cc0e1544b91e1b51e6b2acf7120236dba57a6d7477b4ae07655c2f32
|
"""Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
from sklearn.utils.deprecation import deprecated
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations.
This can also prevent a potential numerical issue during fitting, by
ensuring that the calculated values form a positive definite matrix.
If an array is passed, it must have the same number of entries as the
data used for fitting and is used as datapoint-dependent noise level.
Note that this is equivalent to adding a WhiteKernel with c=alpha.
Allowing to specify the noise level directly as a parameter is mainly
for convenience and for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
@property
@deprecated("Attribute rng was deprecated in version 0.19 and "
"will be removed in 0.21.")
def rng(self):
return self._rng
@property
@deprecated("Attribute y_train_mean was deprecated in version 0.19 and "
"will be removed in 0.21.")
def y_train_mean(self):
return self._y_train_mean
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self._y_train_mean
else:
self._y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=True) # Line 2
# self.L_ changed, self._K_inv needs to be recomputed
self._K_inv = None
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
else:
kernel = self.kernel
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = kernel(X)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self._y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# cache result of K_inv computation
if self._K_inv is None:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T,
np.eye(self.L_.shape[0]))
self._K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ij,ij->i",
np.dot(K_trans, self._K_inv), K_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the
random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
|
herilalaina/scikit-learn
|
sklearn/gaussian_process/gpr.py
|
Python
|
bsd-3-clause
| 20,571
|
[
"Gaussian"
] |
97caaa579b3035c325d407f9c548d4f41d934b6c88ac01dea2aeab73d61ae864
|
import pytest
import smartsheet
from datetime import datetime
from dateutil.tz import *
import json
import os
import six
@pytest.fixture(scope="module")
def smart_setup(request):
# set up a test session folder with basic starting points
smart = smartsheet.Smartsheet(max_retry_time=60)
now = datetime.now(tzlocal()).strftime("%Y-%m-%d %H:%M:%S")
users = os.environ.get('SMARTSHEET_FIXTURE_USERS', None)
if users is None:
pytest.exit('Environment not setup correctly...aborting')
users = json.loads(users)
fixusers = {}
for nick,info in six.iteritems(users):
profile = smart.Users.get_user(info['id'])
assert isinstance(profile, smart.models.UserProfile)
fixusers[nick] = profile
action = smart.Groups.list_groups(include_all=True)
assert isinstance(action, smart.models.IndexResult)
grps = action.result
groups = {}
need_exec = True
for gp in grps:
groups[gp.name] = gp
if gp.name == 'exec':
need_exec = False
if need_exec:
group = smart.models.Group({
'name': 'exec',
'members': [
smart.models.GroupMember({
'email': fixusers['moe'].email
}),
smart.models.GroupMember({
'email': fixusers['admin'].email
})
]
})
action = smart.Groups.create_group(group)
assert action.message == 'SUCCESS'
# test run base folders
folder_name = 'pytest ' + now
action = smart.Home.create_folder(folder_name)
assert action.message == 'SUCCESS'
test_folder = action.result
# add a sheet to mess around with
sheet = smart.models.Sheet({
'name': 'pytest_fixture_sheet ' + now,
'columns': [{
'title': 'The First Column',
'primary': True,
'type': 'TEXT_NUMBER'
}, {
'title': 'Favorite',
'type': 'CHECKBOX',
'symbol': 'STAR'
}, {
'title': 'Disposable',
'type': 'TEXT_NUMBER'
}]
})
action = smart.Folders.create_sheet_in_folder(test_folder.id, sheet)
assert action.message == 'SUCCESS'
sheet = action.result
# get primary column id
for idx, col in enumerate(sheet.columns):
if col.primary:
break
sheet_primary_col = col
# add a row
action = sheet.add_rows([smart.models.Row({
'to_top': True,
'cells': [{
'column_id': sheet_primary_col.id,
'value': 'The first column of the first row.'
}]
})])
assert action.message == 'SUCCESS'
sheet = smart.Sheets.get_sheet(sheet.id)
assert isinstance(sheet, smart.models.Sheet)
sheet_b = smart.models.Sheet({
'name': 'pytest_fixture_sheetB ' + now,
'columns': [{
'title': 'Brand',
'primary': True,
'type': 'TEXT_NUMBER'
}]
})
action = smart.Folders.create_sheet_in_folder(test_folder.id, sheet_b)
assert action.message == 'SUCCESS'
sheet_b = action.result
for idx, col in enumerate(sheet_b.columns):
if col.primary:
break
sheet_b_primary_col = col
action = sheet_b.add_rows([
smart.models.Row({
'to_top': True,
'cells': [{
'column_id': sheet_b_primary_col.id,
'value': 'Nike'
}]
}),
smart.models.Row({
'to_top': True,
'cells': [{
'column_id': sheet_b_primary_col.id,
'value': 'Google'
}]
}),
smart.models.Row({
'to_top': True,
'cells': [{
'column_id': sheet_b_primary_col.id,
'value': 'Adidas'
}]
}),
smart.models.Row({
'to_top': True,
'cells': [{
'column_id': sheet_b_primary_col.id,
'value': 'Keen'
}]
})])
assert action.message == 'SUCCESS'
sheet_b = smart.Sheets.get_sheet(sheet_b.id)
assert isinstance(sheet_b, smart.models.Sheet)
fixture = {
'smart': smart,
'folder': test_folder,
'sheet': sheet,
'sheet_primary_col': sheet_primary_col,
'sheet_b': sheet_b,
'sheet_b_primary_col': sheet_b_primary_col,
'now': now,
'users': fixusers,
'groups': groups
}
def smart_teardown():
action = fixture['smart'].Sheets.delete_sheet(fixture['sheet'].id)
assert action.message == 'SUCCESS'
print("deleted fixture sheet")
action = fixture['smart'].Sheets.delete_sheet(fixture['sheet_b'].id)
assert action.message == 'SUCCESS'
print("deleted fixture sheet_b")
action = fixture['smart'].Folders.delete_folder(fixture['folder'].id)
assert action.message == 'SUCCESS'
print("deleted fixture folder")
if 'folder_b' in fixture:
action = fixture['smart'].Folders.delete_folder(fixture['folder_b'].id)
assert action.message == 'SUCCESS'
print("deleted fixture folder_b")
request.addfinalizer(smart_teardown)
return fixture
|
smartsheet-platform/smartsheet-python-sdk
|
tests/integration/conftest.py
|
Python
|
apache-2.0
| 5,275
|
[
"MOE"
] |
f18c6e7df63c84e67536d53b9b0f79f87ea2e7070287124891eb7120ee8eac3d
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Test the io/__init__.py module.
"""
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests # isort:skip
from io import BytesIO
import iris.fileformats as iff
import iris.io
class TestDecodeUri(tests.IrisTest):
def test_decode_uri(self):
tests = {
"/data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp": (
"file",
"/data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp",
),
r"C:\data\local\someDir\PP\COLPEX\COLPEX_16a_pj001.pp": (
"file",
r"C:\data\local\someDir\PP\COLPEX\COLPEX_16a_pj001.pp",
),
"file:///data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp": (
"file",
"///data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp",
),
"http://www.somehost.com:8080/resource/thing.grib": (
"http",
"//www.somehost.com:8080/resource/thing.grib",
),
"/data/local/someDir/2013-11-25T13:49:17.632797": (
"file",
"/data/local/someDir/2013-11-25T13:49:17.632797",
),
}
for uri, pair in tests.items():
self.assertEqual(pair, iris.io.decode_uri(uri))
class TestFileFormatPicker(tests.IrisTest):
def test_known_formats(self):
self.assertString(
str(iff.FORMAT_AGENT),
tests.get_result_path(("file_load", "known_loaders.txt")),
)
@tests.skip_data
def test_format_picker(self):
# ways to test the format picker = list of (format-name, file-spec)
test_specs = [
(
"NetCDF",
["NetCDF", "global", "xyt", "SMALL_total_column_co2.nc"],
),
(
"NetCDF 64 bit offset format",
["NetCDF", "global", "xyt", "SMALL_total_column_co2.nc.k2"],
),
(
"NetCDF_v4",
["NetCDF", "global", "xyt", "SMALL_total_column_co2.nc4.k3"],
),
(
"NetCDF_v4",
["NetCDF", "global", "xyt", "SMALL_total_column_co2.nc4.k4"],
),
("UM Fieldsfile (FF) post v5.2", ["FF", "n48_multi_field"]),
(
"GRIB",
["GRIB", "grib1_second_order_packing", "GRIB_00008_FRANX01"],
),
("GRIB", ["GRIB", "jpeg2000", "file.grib2"]),
("UM Post Processing file (PP)", ["PP", "simple_pp", "global.pp"]),
(
"UM Post Processing file (PP) little-endian",
["PP", "little_endian", "qrparm.orog.pp"],
),
(
"UM Fieldsfile (FF) ancillary",
["FF", "ancillary_fixed_length_header"],
),
# ('BUFR',
# ['BUFR', 'mss', 'BUFR_Samples',
# 'JUPV78_EGRR_121200_00002501']),
(
"NIMROD",
[
"NIMROD",
"uk2km",
"WO0000000003452",
"201007020900_u1096_ng_ey00_visibility0180_screen_2km",
],
),
# ('NAME',
# ['NAME', '20100509_18Z_variablesource_12Z_VAAC',
# 'Fields_grid1_201005110000.txt']),
]
# test that each filespec is identified as the expected format
for (expected_format_name, file_spec) in test_specs:
test_path = tests.get_data_path(file_spec)
with open(test_path, "rb") as test_file:
a = iff.FORMAT_AGENT.get_spec(test_path, test_file)
self.assertEqual(a.name, expected_format_name)
def test_format_picker_nodata(self):
# The following is to replace the above at some point as no real files
# are required.
# (Used binascii.unhexlify() to convert from hex to binary)
# Packaged grib, magic number offset by set length, this length is
# specific to WMO bulletin headers
header_lengths = [21, 80, 41, 42]
for header_length in header_lengths:
binary_string = header_length * b"\x00" + b"GRIB" + b"\x00" * 100
with BytesIO(b"rw") as bh:
bh.write(binary_string)
bh.name = "fake_file_handle"
a = iff.FORMAT_AGENT.get_spec(bh.name, bh)
self.assertEqual(a.name, "GRIB")
def test_open_dap(self):
# tests that *ANY* http or https URL is seen as an OPeNDAP service.
# This may need to change in the future if other protocols are
# supported.
DAP_URI = "http://geoport.whoi.edu/thredds/dodsC/bathy/gom15"
a = iff.FORMAT_AGENT.get_spec(DAP_URI, None)
self.assertEqual(a.name, "NetCDF OPeNDAP")
if __name__ == "__main__":
tests.main()
|
rcomer/iris
|
lib/iris/tests/test_io_init.py
|
Python
|
lgpl-3.0
| 5,202
|
[
"NetCDF"
] |
ccaca962fb40272d93ad3fec57303f7fe222221d350924f2e1e48ef1ed2af69a
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""CP2K atomic wavefunctions"""
import numpy as np
from horton.gbasis.iobas import str_to_shell_types
from horton.gbasis.cext import GOBasis, fac2
from horton.meanfield.orbitals import Orbitals
__all__ = ['load_atom_cp2k']
def _get_cp2k_norm_corrections(l, alphas):
"""Compute the corrections for the normalization of the basis functions.
This correction is needed because the CP2K atom code works with non-normalized basis
functions. HORTON assumes Gaussian primitives are always normalized.
Parameters
----------
l : int
The angular momentum of the (pure) basis function. (s=0, p=1, ...)
alphas : float or np.ndarray
The exponent or exponents of the Gaussian primitives for which the correction
is to be computed.
Returns
-------
corrections : float or np.ndarray
The scale factor for the expansion coefficients of the wavefunction in
terms of primitive Gaussians. The inverse of this correction can be
applied to the contraction coefficients.
"""
expzet = 0.25*(2*l + 3)
prefac = np.sqrt(np.sqrt(np.pi)/2.0**(l + 2)*fac2(2*l + 1))
zeta = 2.0*alphas
return zeta**expzet/prefac
def _read_cp2k_contracted_obasis(f):
"""Read a contracted basis set from an open CP2K ATOM output file.
Parameters
----------
f : file
An open readable file object.
Returns
-------
obasis : GOBasis
The orbital basis read from the file.
"""
# Load the relevant data from the file
basis_desc = []
for line in f:
if line.startswith(' *******************'):
break
elif line[3:12] == 'Functions':
shell_type = str_to_shell_types(line[1:2], pure=True)[0]
a = [] # exponents (alpha)
c = [] # contraction coefficients
basis_desc.append((shell_type, a, c))
else:
values = [float(w) for w in line.split()]
a.append(values[0]) # one exponent per line
c.append(values[1:]) # many contraction coefficients per line
# Convert the basis into HORTON format
shell_map = []
shell_types = []
nprims = []
alphas = []
con_coeffs = []
for shell_type, a, c in basis_desc:
# get correction to contraction coefficients. CP2K uses different normalization
# conventions.
corrections = _get_cp2k_norm_corrections(abs(shell_type), np.array(a))
c = np.array(c)/corrections.reshape(-1, 1)
# fill in arrays
for col in c.T:
shell_map.append(0)
shell_types.append(shell_type)
nprims.append(len(col))
alphas.extend(a)
con_coeffs.extend(col)
# Create the basis object
coordinates = np.zeros((1, 3))
shell_map = np.array(shell_map)
nprims = np.array(nprims)
shell_types = np.array(shell_types)
alphas = np.array(alphas)
con_coeffs = np.array(con_coeffs)
obasis = GOBasis(coordinates, shell_map, nprims, shell_types, alphas, con_coeffs)
return obasis
def _read_cp2k_uncontracted_obasis(f):
"""Read an uncontracted basis set from an open CP2K ATOM output file.
Parameters
----------
f : file
An open readable file object.
Returns
-------
obasis : GOBasis
The orbital basis read from the file.
"""
# Load the relevant data from the file
basis_desc = []
shell_type = None
for line in f:
if line.startswith(' *******************'):
break
elif line[3:13] == 'Exponents:':
shell_type = str_to_shell_types(line[1:2], pure=True)[0]
words = line.split()
if len(words) >= 2:
# read the exponent
alpha = float(words[-1])
basis_desc.append((shell_type, alpha))
# Convert the basis into HORTON format
shell_map = []
shell_types = []
nprims = []
alphas = []
con_coeffs = []
# fill in arrays
for shell_type, alpha in basis_desc:
correction = _get_cp2k_norm_corrections(abs(shell_type), alpha)
shell_map.append(0)
shell_types.append(shell_type)
nprims.append(1)
alphas.append(alpha)
con_coeffs.append(1.0 / correction)
# Create the basis object
centers = np.zeros((1, 3))
shell_map = np.array(shell_map)
nprims = np.array(nprims)
shell_types = np.array(shell_types)
alphas = np.array(alphas)
con_coeffs = np.array(con_coeffs)
obasis = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
return obasis
def _read_cp2k_obasis(f):
"""Read a basis set from an open CP2K ATOM output file."""
next(f) # Skip empty line
line = next(f) # Check for contracted versus uncontracted
if line == ' ********************** Contracted Gaussian Type Orbitals '\
'**********************\n':
return _read_cp2k_contracted_obasis(f)
elif line == ' ********************* Uncontracted Gaussian Type Orbitals '\
'*********************\n':
return _read_cp2k_uncontracted_obasis(f)
else:
raise IOError('Could not find basis set in CP2K ATOM output.')
def _read_cp2k_occupations_energies(f, restricted):
"""Read orbital occupation numbers and energies from an open CP2K ATOM output file.
Parameters
----------
f : file
An open readable file object.
restricted : bool
Is wavefunction restricted or unrestricted?
Returns
-------
oe_alpha, oe_beta : list
A list with orbital properties. Each element is a tuple with the
following info: (angular_momentum l, spin component: 'alpha' or
'beta', occupation number, orbital energy).
"""
oe_alpha = []
oe_beta = []
empty = 0
while empty < 2:
line = next(f)
words = line.split()
if len(words) == 0:
empty += 1
continue
empty = 0
s = int(words[0])
l = int(words[2 - restricted])
occ = float(words[3 - restricted])
ener = float(words[4 - restricted])
if restricted or words[1] == 'alpha':
oe_alpha.append((l, s, occ, ener))
else:
oe_beta.append((l, s, occ, ener))
return oe_alpha, oe_beta
def _read_cp2k_orbital_coeffs(f, oe):
"""Read the expansion coefficients of the orbital from an open CP2K ATOM output.
Parameters
----------
f : file
An open readable file object.
oe : list
The orbital occupation numbers and energies read with
``_read_cp2k_occupations_energies``.
Returns
-------
result : dict
Key is an (l, s) pair and value is an array with orbital coefficients.
"""
coeffs = {}
next(f)
while len(coeffs) < len(oe):
line = next(f)
assert line.startswith(" ORBITAL L =")
words = line.split()
l = int(words[3])
s = int(words[6])
c = []
while True:
line = next(f)
if len(line.strip()) == 0:
break
c.append(float(line))
coeffs[(l, s)] = np.array(c)
return coeffs
def _get_norb_nel(oe):
"""Return number of orbitals and electrons.
Parameters
----------
oe : list
The orbital occupation numbers and energies read with
``_read_cp2k_occupations_energies``.
"""
norb = 0
nel = 0
for row in oe:
norb += 2*row[0] + 1
nel += row[2]
return norb, nel
def _fill_orbitals(orb, oe, coeffs, shell_types, restricted):
"""Fill in orbital coefficients, energies and occupation numbers in ``orb``.
Parameters
----------
orb : Orbitals
An object to represent the orbitals
oe : list
The orbital occupation numbers and energies read with
``_read_cp2k_occupations_energies``.
coeffs : dict
The orbital coefficients read with ``_read_cp2k_orbital_coeffs``.
shell_types : np.ndarray
The array with shell types of the GOBasis instance.
restricted : bool
Is wavefunction restricted or unrestricted?
"""
# Find the offsets for each angular momentum
offset = 0
offsets = []
ls = abs(shell_types)
for l in sorted(set(ls)):
offsets.append(offset)
offset += (2*l + 1)*(l == ls).sum()
del offset
# Fill in the coefficients
iorb = 0
for l, s, occ, ener in oe:
cs = coeffs.get((l, s))
stride = 2*l + 1
for m in range(-l, l+1):
im = m + l
orb.energies[iorb] = ener
orb.occupations[iorb] = occ/float((restricted + 1)*(2*l + 1))
for ic in range(len(cs)):
orb.coeffs[offsets[l] + stride*ic + im, iorb] = cs[ic]
iorb += 1
def load_atom_cp2k(filename):
"""Load data from a CP2K ATOM computation.
Parameters
---------
filename : str
The name of the cp2k out file
Returns
-------
results : dict
Contains: ``obasis``, ``orb_alpha``, ``coordinates``, ``numbers``, ``energy``,
``pseudo_numbers``. May contain: ``orb_beta``.
Notes
-----
This function assumes that the following subsections are present in the CP2K
ATOM input file, in the section ``ATOM%PRINT``:
.. code-block:: text
&PRINT
&POTENTIAL
&END POTENTIAL
&BASIS_SET
&END BASIS_SET
&ORBITALS
&END ORBITALS
&END PRINT
"""
with open(filename) as f:
# Find the element number
number = None
for line in f:
if line.startswith(' Atomic Energy Calculation'):
number = int(line[-5:-1])
break
if number is None:
raise IOError('Could not find atomic number in CP2K ATOM output: %s.' % filename)
# Go to the all-electron basis set and read it.
for line in f:
if line.startswith(' All Electron Basis'):
break
ae_obasis = _read_cp2k_obasis(f)
# Go to the pseudo basis set and read it.
for line in f:
if line.startswith(' Pseudopotential Basis'):
break
pp_obasis = _read_cp2k_obasis(f)
# Search for (un)restricted
restricted = None
for line in f:
if line.startswith(' METHOD |'):
if 'U' in line:
restricted = False
break
elif 'R' in line:
restricted = True
break
# Search for the core charge (pseudo number)
pseudo_number = None
for line in f:
if line.startswith(' Core Charge'):
pseudo_number = float(line[70:])
assert pseudo_number == int(pseudo_number)
break
elif line.startswith(' Electronic structure'):
pseudo_number = float(number)
break
if pseudo_number is None:
raise IOError('Could not find effective core charge in CP2K ATOM output:'
' %s' % filename)
# Select the correct basis
if pseudo_number == number:
obasis = ae_obasis
else:
obasis = pp_obasis
# Search for energy
for line in f:
if line.startswith(' Energy components [Hartree] Total Energy ::'):
energy = float(line[60:])
break
# Read orbital energies and occupations
for line in f:
if line.startswith(' Orbital energies'):
break
next(f)
oe_alpha, oe_beta = _read_cp2k_occupations_energies(f, restricted)
# Read orbital expansion coefficients
line = next(f)
if (line != " Atomic orbital expansion coefficients [Alpha]\n") and \
(line != " Atomic orbital expansion coefficients []\n"):
raise IOError('Could not find orbital coefficients in CP2K ATOM output: '
'%s' % filename)
coeffs_alpha = _read_cp2k_orbital_coeffs(f, oe_alpha)
if not restricted:
line = next(f)
if line != " Atomic orbital expansion coefficients [Beta]\n":
raise IOError('Could not find beta orbital coefficient in CP2K ATOM '
'output: %s' % filename)
coeffs_beta = _read_cp2k_orbital_coeffs(f, oe_beta)
# Turn orbital data into a HORTON orbital expansions
if restricted:
norb, nel = _get_norb_nel(oe_alpha)
assert nel % 2 == 0
orb_alpha = Orbitals(obasis.nbasis, norb)
orb_beta = None
_fill_orbitals(orb_alpha, oe_alpha, coeffs_alpha, obasis.shell_types, restricted)
else:
norb_alpha = _get_norb_nel(oe_alpha)[0]
norb_beta = _get_norb_nel(oe_beta)[0]
assert norb_alpha == norb_beta
orb_alpha = Orbitals(obasis.nbasis, norb_alpha)
orb_beta = Orbitals(obasis.nbasis, norb_beta)
_fill_orbitals(orb_alpha, oe_alpha, coeffs_alpha, obasis.shell_types, restricted)
_fill_orbitals(orb_beta, oe_beta, coeffs_beta, obasis.shell_types, restricted)
result = {
'obasis': obasis,
'orb_alpha': orb_alpha,
'coordinates': obasis.centers,
'numbers': np.array([number]),
'energy': energy,
'pseudo_numbers': np.array([pseudo_number]),
}
if orb_beta is not None:
result['orb_beta'] = orb_beta
return result
|
theochem/horton
|
horton/io/cp2k.py
|
Python
|
gpl-3.0
| 14,579
|
[
"CP2K",
"Gaussian"
] |
adf404e440a33bd9191e414f8377209391769f3d34694ac8049230dd8943d7fe
|
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.extmath import norm
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print "SVD did not converge, randomizing and trying again"
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
k=None, eigen_tol=0.0,
assign_labels='kmeans',
mode=None):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity: array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetic k-nearest neighbours connectivity matrix of the samples.
n_clusters: integer, optional
Number of clusters to extract.
n_components: integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
eigen_solver: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init: int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization.
Returns
-------
labels: array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if not assign_labels in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
if not k is None:
warnings.warn("'k' was renamed to n_clusters and will "
"be removed in 0.15.",
DeprecationWarning)
n_clusters = k
if not mode is None:
warnings.warn("'mode' was renamed to eigen_solver "
"and will be removed in 0.15.",
DeprecationWarning)
eigen_solver = mode
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either the
Gaussian (aka RBF) kernel of the euclidean distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity: string, 'nearest_neighbors', 'rbf' or 'precomputed'
gamma: float
Scaling factor of Gaussian (rbf) affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
n_neighbors: integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
Attributes
----------
`affinity_matrix_` : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
`labels_` :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10, k=None,
eigen_tol=0.0, assign_labels='kmeans', mode=None):
if k is not None:
warnings.warn("'k' was renamed to n_clusters and "
"will be removed in 0.15.",
DeprecationWarning)
n_clusters = k
if mode is not None:
warnings.warn("'mode' was renamed to eigen_solver and "
"will be removed in 0.15.",
DeprecationWarning)
eigen_solver = mode
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
def fit(self, X):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'rbf':
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma)
elif self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
raise ValueError("Invalid 'affinity'. Expected 'rbf', "
"'nearest_neighbors' or 'precomputed', got '%s'."
% self.affinity)
self.random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=self.random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
|
mrshu/scikit-learn
|
sklearn/cluster/spectral.py
|
Python
|
bsd-3-clause
| 17,408
|
[
"Brian",
"Gaussian"
] |
a3564d66c8f1468a5cfb60a5454f86d89e3f0175e6b4bba35b57952bcb9a3a02
|
import unittest
import datetime
import json
from mock import MagicMock
from DIRAC.DataManagementSystem.Agent.RequestOperations.ReplicateAndRegister import ReplicateAndRegister
from DIRAC.RequestManagementSystem.Client.File import File
class ReqOpsTestCase( unittest.TestCase ):
""" Base class for the clients test cases
"""
def setUp( self ):
fcMock = MagicMock()
ftsMock = MagicMock
self.rr = ReplicateAndRegister()
self.rr.fc = fcMock
self.rr.ftsClient = ftsMock
def tearDown( self ):
pass
#############################################################################
class ReplicateAndRegisterSuccess( ReqOpsTestCase ):
def test__addMetadataToFiles( self ):
resMeta = {'OK': True,
'Value': {'Failed': {},
'Successful': {'/lhcb/1.dst': {'ChecksumType': 'AD',
'Checksum': '123456',
'CreationDate': datetime.datetime( 2013, 12, 11, 20, 20, 21 ),
'GUID': '92F9CE97-7A62-E311-8401-0025907FD430',
'Mode': 436,
'ModificationDate': datetime.datetime( 2013, 12, 11, 20, 20, 21 ),
'NumberOfLinks': 1,
'Size': 5846023777,
'Status': '-'},
'/lhcb/2.dst': {'ChecksumType': 'AD',
'Checksum': '987654',
'CreationDate': datetime.datetime( 2013, 12, 12, 6, 26, 52 ),
'GUID': 'DAE4933A-C162-E311-8A6B-003048FEAF04',
'Mode': 436,
'ModificationDate': datetime.datetime( 2013, 12, 12, 6, 26, 52 ),
'NumberOfLinks': 1,
'Size': 5893396937,
'Status': '-'}}}}
self.rr.fc.getFileMetadata.return_value = resMeta
file1 = File()
file1.LFN = '/lhcb/1.dst'
file2 = File()
file2.LFN = '/lhcb/2.dst'
toSchedule = {'/lhcb/1.dst': [file1, ['SE1'], ['SE2', 'SE3']],
'/lhcb/2.dst': [file2, ['SE4'], ['SE5', 'SE6']]}
res = self.rr._addMetadataToFiles( toSchedule )
self.assert_( res['OK'] )
self.assertEqual( json.loads( res['Value'][0][0] )['LFN'], resMeta['Value']['Successful'].keys()[0] )
self.assertEqual( json.loads( res['Value'][0][0] )['Size'], resMeta['Value']['Successful'].values()[0]['Size'] )
self.assertEqual( json.loads( res['Value'][1][0] )['LFN'], resMeta['Value']['Successful'].keys()[1] )
self.assertEqual( json.loads( res['Value'][1][0] )['Size'], resMeta['Value']['Successful'].values()[1]['Size'] )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( ReqOpsTestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( ReplicateAndRegisterSuccess ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
vmendez/DIRAC
|
DataManagementSystem/Agent/RequestOperations/test/test_RequestOperations.py
|
Python
|
gpl-3.0
| 3,274
|
[
"DIRAC"
] |
69653d5e64d34069f9cc126586b02c79b1e41c6fa3743839a7206e54da8b4f17
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 2 14:48:24 2017
@author: thomas
"""
#from layers import Latent_Layer
import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from tfutils.helpers import repeat_v2
from tfutils.distributions import logsumexp, discretized_logistic
from layers import Latent_Layer
class Network(object):
''' VAE template '''
def __init__(self,hps):
# placeholders
self.x = x = tf.placeholder("float32", shape=[None,1])
self.y = y = tf.placeholder("float32", shape=[None,1])
self.is_training = is_training = tf.placeholder("bool") # if True: sample from q, else sample from p
self.k = k = tf.placeholder('int32') # number of importance samples
self.temp = temp = tf.Variable(5.0,name='temperature',trainable=False) # Temperature for discrete latents
self.lamb = lamb = tf.Variable(1.0,name="lambda",trainable=False) # Lambda for KL annealing
# Importance sampling: repeats along second dimension
x_rep = repeat_v2(x,k)
y_rep = repeat_v2(y,k)
# Encoder x,y --> h
xy = tf.concat([x_rep,y_rep],1) # concatenate along last dim
h_up = slim.fully_connected(xy,hps.h_size,tf.nn.relu)
# Initialize ladders
layers = []
for i in range(hps.depth):
layers.append(Latent_Layer(hps=hps,var_type=hps.var_type[i],depth=i,is_top=(i==(hps.depth-1))))
# Ladder up
for i,layer in enumerate(layers):
h_up = layer.up(h_up)
# Prior x --> p_z
h_down = slim.fully_connected(x_rep,hps.h_size,tf.nn.relu)
kl_sum = 0.0
kl_sample = 0.0
# Ladder down
for i,layer in reversed(list(enumerate(layers))):
h_down, kl_cur, kl_sam = layer.down(h_down,is_training,temp,lamb)
kl_sum += kl_cur
kl_sample += kl_sam
# Decoder: x,z --> y
xz = tf.concat([slim.flatten(h_down),x_rep],1)
dec1 = slim.fully_connected(xz,50,tf.nn.relu)
dec2 = slim.fully_connected(dec1,50,tf.nn.relu)
dec3 = slim.fully_connected(dec2,50,activation_fn=None)
mu_y = slim.fully_connected(dec3,1,activation_fn=None)
if hps.ignore_sigma_outcome:
log_dec_noise = tf.zeros(tf.shape(mu_y))
else:
log_dec_noise = slim.fully_connected(dec3,1,activation_fn=None)
# p(y|x,z)
if hps.out_lik == 'normal':
dec_noise = tf.exp(tf.clip_by_value(log_dec_noise,-10,10))
outdist = tf.contrib.distributions.Normal(mu_y,dec_noise)
self.log_py_x = log_py_x = tf.reduce_sum(outdist.log_prob(y_rep),axis=1)
self.nats = -1*tf.reduce_mean(logsumexp(tf.reshape(log_py_x - kl_sample,[-1,k])) - tf.log(tf.to_float(k)))
elif hps.out_lik == 'discretized_logistic':
self.log_py_x = log_py_x = tf.reduce_sum(discretized_logistic(mu_y,log_dec_noise,binsize=1,sample=y_rep),axis=1)
outdist = tf.contrib.distributions.Logistic(loc=mu_y,scale = tf.exp(log_dec_noise))
self.nats = -1*tf.reduce_mean(logsumexp(tf.reshape(tf.reduce_sum(outdist.log_prob(y_rep),axis=1) - kl_sample,[-1,k]))- tf.log(tf.to_float(k)))
elif hps.out_lik == 'squared_error':
hps.ignore_sigma_outcome = True
self.log_py_x = log_py_x = -tf.reduce_sum(tf.pow(mu_y - y_rep, 2),axis=1) # Gaussian loglik has minus in front
self.nats = tf.zeros([1])
# sample y
if hps.ignore_sigma_outcome:
self.y_sample = mu_y
else:
self.y_sample = outdist.sample()
# To display KL
self.kl = tf.reduce_mean(kl_sum)
# ELBO
log_divergence = tf.reshape(log_py_x - kl_sum,[-1,k]) # shape [batch_size,k]
if np.abs(hps.alpha-1.0)>1e-3: # prevent zero division
log_divergence = log_divergence * (1-hps.alpha)
logF = logsumexp(log_divergence)
self.elbo = elbo = tf.reduce_mean(logF - tf.log(tf.to_float(k)))/ (1-hps.alpha)
else:
logF = logsumexp(log_divergence)
self.elbo = elbo = tf.reduce_mean(logF - tf.log(tf.to_float(k)))
self.loss = loss = -elbo
### Optimizer
self.lr = lr = tf.Variable(0.001,name="learning_rate",trainable=False)
global_step = tf.Variable(0,name='global_step',trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
self.train_op = optimizer.minimize(loss,global_step=global_step)
self.init_op=tf.global_variables_initializer()
#gvs = optimizer.compute_gradients(loss)
#if hps.grad_clip > 0: # gradient clipping
# gvs = [(tf.clip_by_value(grad, -hps.grad_clip, hps.grad_clip), var) for grad, var in gvs]
#self.train_op = optimizer.apply_gradients(gvs)
|
tmoer/multimodal_varinf
|
networks/toy_vae.py
|
Python
|
mit
| 5,171
|
[
"Gaussian"
] |
6df7b782d9445eeee7e0d6a07a2512494d753a5c81e41009cf7833d9ec63cbba
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import PHOENIX_tools as pt
from fft_interpolate import gauss_taper
from scipy.signal import resample, hann, kaiser, boxcar
from scipy.special import i0, iv
from scipy.integrate import trapz
from numpy.fft import fft, ifft, fftfreq, fftshift, ifftshift
from sinc_interpolate import Sinc_w
#fftshift and ifftshift have different behaviours depending on whether N is odd or even. When even, they behave the
# same. The only important time to use them is when you are shifting an odd array back and forth immediately. Not if
# you're shifting back from a FFT
#fftshift(fftfreq()) is the correct one to use
w_full = pt.w_full
#Shrink to just Dave's Order
ind = (w_full > 5120) & (w_full < 5220)
f_full = pt.load_flux_full(5900, 3.5, True)[ind]
w_full = w_full[ind]
c_kms = 2.99792458e5 #km s^-1
@np.vectorize
def L(x, a=4):
if np.abs(x) < a:
return np.sinc(x) * np.sinc(x / a)
else:
return 0.
@np.vectorize
def window(x, name, a=2, alpha=5):
if np.abs(x) <= a:
if name == 'lanczos':
return np.sinc(x / a)
if name == 'hann':
return 0.5 * (1 + np.cos(np.pi * x / a))
if name == 'kaiser':
return i0(np.pi * alpha * np.sqrt(1 - (x / a) ** 2)) / i0(np.pi * alpha)
if name == None or window == 'boxcar':
return 1.
else:
return 0.
def plot_windows():
windows = ['lanczos', 'hann', 'kaiser']
fig = plt.figure()
ax = fig.add_subplot(111)
xs = np.linspace(-2, 2)
ax.plot(xs, window(xs, 'lanczos'), label='Lanczos')
ax.plot(xs, window(xs, 'hann'), label='Hann')
ax.plot(xs, window(xs, 'kaiser', alpha=2), label='Kaiser=2')
ax.plot(xs, window(xs, 'kaiser', alpha=10), label='Kaiser=10')
ax.legend()
ax.set_xlabel(r"$\pm a$")
fig.savefig("plots/windows.png")
def kaiser_discrete(alpha):
N = 29
M = N - 1
ns = np.arange(M + 0.1)
z = 2 * ns / M - 1
wn = i0(np.pi * alpha * np.sqrt(1 - (z) ** 2)) / i0(np.pi * alpha)
return wn
def plot_kaiser_discrete():
plt.plot(kaiser_discrete(3), label="3")
plt.plot(kaiser_discrete(5), label="5")
plt.plot(kaiser_discrete(14), label="14")
plt.show()
def sinc_w(x, name='lanczos', a=2, alpha=5):
'''Return a windowed sinc (for interpolation) using window and scale parameter (in pixels) of a.'''
w0 = np.sinc(x)
return w0 * window(x, name=name, a=a, alpha=alpha)
def get_db_response(xs, name, a=2, alpha=5, n=400):
F = fft(ifftshift(sinc_w(xs, name, a=a, alpha=alpha)))
F = F / F[0]
#Fs = fftshift(F)
return 10 * np.log10(np.abs(F))
def sinc_w_interp(lam, wl, fl, name='lanczos', a=2, alpha=5):
'''lam is the spot to interpolate to, while wl and fl are the distcrete wavelengths and fluxes.'''
#find starting index for floor(lam)
floor_ind = np.argwhere(lam > wl)[-1][0]
b_i = floor_ind - a + 1
e_i = floor_ind + a + 1 #+1 for slicing
wls = wl[b_i:e_i]
dlam = wl[floor_ind] - wl[floor_ind - 1]
fls = fl[b_i:e_i]
fl = np.sum(fls * sinc_w((lam - wls) / dlam, name, a, alpha))
return fl
#wl = np.arange(10,20.1,0.5)
#fl = wl
#print(sinc_w_interp(15.25, wl, fl, a=5))
#print(sinc_w_interp(15.25, wl, fl, name='hann', a=10))
def plot_sinc_windows():
fig, ax = plt.subplots(nrows=2, figsize=(8, 8))
xs = np.linspace(-2, 2., num=200)
xs4 = np.linspace(-5, 5, num=500)
y2s = sinc_w(xs, 'lanczos')
y4s = sinc_w(xs4, 'lanczos', a=5)
yks = sinc_w(xs4, 'kaiser', a=5, alpha=5)
yks2 = sinc_w(xs, 'kaiser', a=2, alpha=5)
ax[0].plot(xs, y2s, "b", label='Lanczos, a=2')
ax[0].plot(xs4, y4s, 'g', label='Lanczos, a=5')
ax[0].plot(xs4, yks, "r", label='Kaiser 5, a=5')
ax[0].plot(xs, yks2, "c", label='Kaiser 5, a=2')
#ax[0].plot(xs,sinc_w(xs, 'hann'),label='Hann')
#ax[0].plot(xs,sinc_w(xs, 'kaiser',alpha=5),label='Kaiser=5')
#ax[0].plot(xs,sinc_w(xs, 'kaiser',alpha=10),label='Kaiser=10')
#xs4 = np.linspace(-4,4,num=100)
#ax[0].plot(xs4,sinc_w(xs4, 'lanczos', a = 4), label='Lanczos,a=4')
ax[0].legend()
ax[0].set_xlabel(r"$\pm a$")
#n=400 #zeropadd FFT
#freqs2 = fftfreq(len(y2s),d=xs[1]-xs[0])
#freqs4 =fftfreq(400,d=xs4[1]-xs4[0])
ysh = ifftshift(y2s)
pady = np.concatenate((ysh[:100], np.zeros((1000,)), ysh[100:]))
freq2 = fftshift(fftfreq(len(pady), d=0.02))
ys4h = ifftshift(y4s)
pad4y = np.concatenate((ys4h[:250], np.zeros((2000,)), ys4h[250:]))
freq4 = fftshift(fftfreq(len(pad4y), d=0.02))
fpady = fft(pady)
fpad4y = fft(pad4y)
ax[1].plot(freq2, 10 * np.log10(np.abs(fftshift(fpady / fpady[0]))))
ax[1].plot(freq4, 10 * np.log10(np.abs(fftshift(fpad4y / fpad4y[0]))))
ysk = ifftshift(yks)
padk = np.concatenate((ysk[:250], np.zeros((2000,)), ysk[250:]))
fpadk = fft(padk)
ax[1].plot(freq4, 10 * np.log10(np.abs(fftshift(fpadk / fpadk[0]))))
ysk2 = ifftshift(yks2)
padk2 = np.concatenate((ysk2[:100], np.zeros((1000,)), ysk2[100:]))
fpadk2 = fft(padk2)
ax[1].plot(freq2, 10 * np.log10(np.abs(fftshift(fpadk2 / fpadk2[0]))))
#ax[1].plot(freqs4, fft(ifftshift(
#ax[1].plot(freqs, get_db_response(xs, 'hann'),label='Hann')
#ax[1].plot(freqs, get_db_response(xs, 'kaiser',alpha=5),label='Kaiser=5')
#ax[1].plot(freqs, get_db_response(xs, 'kaiser',alpha=10),label='Kaiser=10')
#ax[1].legend()
ax[1].set_ylabel("dB")
ax[1].set_xlabel("cycles/a")
plt.show()
#fig.savefig("plots/sinc_windows.png")
##can try with different windows and see the effect on a spectrum.
# take PHOENIX spectrum linear in lambda over some narrow wavelength, FFT, convolve with Gaussian. Then see how
# different windows recover features of the spectrum.
#Might consider Hann, Kaiser
#How can we get a handle on what is the problem -- is it spectral leakage or is it blurring out frequencies? Zoom in
# on actual FT to see?
def plot_truncation():
n_wl = len(w_full)
if n_wl % 2 == 0:
print("Full Even")
else:
print("Full Odd")
out = fft(ifftshift(f_full))
w0 = ifftshift(w_full)[0]
freqs = fftfreq(len(f_full), d=0.01) # spacing, Ang
#sfreqs = fftshift(freqs)
taper = gauss_taper(freqs, sigma=0.0496) #Ang, corresponds to 2.89 km/s at 5150A.
tout = out * taper
#ignore all samples higher than 5.0 km/s = 0.5/0.0429 cycles/Ang
sc = 0.5 / 0.0429
ind = np.abs(freqs) <= sc
ttrim = tout[ind]
if len(ttrim) % 2 == 0:
print("Trim Even")
else:
print("Trim Odd")
f_restored = fftshift(ifft(tout))
np.save("PH6.8kms_0.01ang.npy", np.array([w_full, np.abs(f_restored)]))
scale_factor = len(ttrim) / n_wl
f_restored2 = scale_factor * fftshift(ifft(ttrim))
d = freqs[1]
print(d)
# must keep track of which index!! AND which ifftshift vs fftshift
raw_wl = fftshift(fftfreq(len(ttrim), d=d))
wl_restored = raw_wl + w0
np.save("PH2.5kms.npy", np.array([wl_restored, np.abs(f_restored2)]))
plt.plot(w_full, f_restored)
plt.plot(wl_restored, f_restored2, "go")
plt.show()
return wl_restored
def plot_FFTs():
fig, ax = plt.subplots(nrows=4, figsize=(15, 8))
#n = 100000
##Take FFT of f_grid
#w_full = w_full[:-1]
#f_full = f_full[:-1]
out = np.fft.fft(np.fft.fftshift(f_full))
freqs = fftfreq(len(f_full), d=0.01) # spacing, Ang
sfreqs = fftshift(freqs)
taper = gauss_taper(freqs, sigma=0.0496) #Ang, corresponds to 2.89 km/s at 5150A.
tout = out * taper
ax[0].plot(sfreqs, np.fft.fftshift(out))
ax[1].plot(tout)
#ax[1].plot(sfreqs,np.fft.fftshift(taper)*tout[0])
ax[1].set_xlabel(r"cycles/$\lambda$")
n_wl = len(w_full)
print(n_wl)
#trucate at 3 Sigma = 3.21 cycles/Ang
#ind = np.abs(freqs) < 3.21
#Pad tout and window
f_restored = ifftshift(ifft(tout))
#ax[2].plot(w_full,f_full)
ax[2].plot(w_full, f_restored, "bo")
#where to pad zeros? In the middle near high frequencies, so it goes +, zeros, -
zeros = np.zeros((n_wl,))
nyq = np.ceil(n_wl / 2) #find where the nyquist frequency is stored in the array
t_pack = np.concatenate((tout[:nyq], zeros, tout[nyq:]))
wl0 = w_full[nyq]
scale_factor = len(t_pack) / n_wl
f_restored2 = scale_factor * ifftshift(ifft(t_pack))
wls = ifftshift(fftfreq(len(t_pack), d=0.01)) + wl0
#ax[2].plot(wls,f_restored2)
#print(np.sum(f_restored),np.sum(f_restored2))
#print(trapz(f_restored,w_full),trapz(f_restored2,wls))
#sample at an offset in phase
half_shift = tout * np.exp(-2j * np.pi * freqs * 0.0248)
f_restored_shift = ifftshift(ifft(half_shift))
ax[2].plot(w_full - 0.0248, f_restored_shift, "go")
plt.show()
def plot_pixel_effect():
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(8, 8))
#fig = plt.figure()
#ax = fig.add_subplot(111)
out = fft(ifftshift(f_full))
freqs = fftfreq(len(f_full), d=0.01) # spacing, Ang
sfreqs = fftshift(freqs)
taper = gauss_taper(freqs, sigma=0.0496) #Ang, corresponds to 2.89 km/s at 5150A.
tout = out * taper
for ax in axs[:, 0]:
ax.plot(sfreqs, fftshift(tout) / tout[0])
ax.plot(sfreqs, fftshift(taper))
ax.plot(sfreqs, 0.0395 * np.sinc(0.0395 * sfreqs))
ax.plot(sfreqs, 0.0472 * np.sinc(0.0472 * sfreqs))
for ax in axs[:, 1]:
ax.plot(sfreqs, 10 * np.log10(np.abs(fftshift(tout) / tout[0])))
ax.plot(sfreqs, 10 * np.log10(np.abs(fftshift(taper))))
ax.plot(sfreqs, 10 * np.log10(np.abs(0.0395 * np.sinc(0.0395 * sfreqs))))
ax.plot(sfreqs, 10 * np.log10(np.abs(0.0472 * np.sinc(0.0472 * sfreqs))))
axs[0, 0].set_ylabel("Norm amp")
axs[1, 0].set_ylabel("Norm amp")
axs[0, 1].set_ylabel("dB")
axs[1, 1].set_ylabel("dB")
for ax in axs.flatten():
ax.set_xlabel(r"cycles/$\lambda$")
plt.show()
def compare_interpolated_spectrum():
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
out = fft(ifftshift(f_full))
freqs = fftfreq(len(f_full), d=0.01) # spacing, Ang
sfreqs = fftshift(freqs)
taper = gauss_taper(freqs, sigma=0.0496) #Ang, corresponds to 2.89 km/s at 5150A.
tout = out * taper
ax.plot(sfreqs, fftshift(tout))
wl_h, fl_h = np.abs(np.load("PH6.8kms_0.01ang.npy"))
wl_l, fl_l = np.abs(np.load("PH2.5kms.npy"))
#end edges
wl_he = wl_h[200:-200]
fl_he = fl_h[200:-200]
interp = Sinc_w(wl_l, fl_l, a=5, window='kaiser')
fl_hi = interp(wl_he)
d = wl_he[1] - wl_he[0]
out = fft(ifftshift(fl_hi))
freqs = fftfreq(len(out), d=d)
ax.plot(fftshift(freqs), fftshift(out))
plt.show()
@np.vectorize
def lan_intp(x):
x_floor = np.argwhere((xs < x))[-1][0] # x = 13, x_floor = 4
b_i = x_floor - a + 1
e_i = x_floor + a
ii = np.arange(b_i, e_i + 1)
s_x = np.sum(ys[ii] * L(x - xs[ii]))
return s_x
def plot_lan_intp():
fig = plt.figure()
ax = fig.add_subplot(111)
xs_fine = np.linspace(14.1, 20.9)
ax.plot(xs_fine, lan_intp(xs_fine))
ax.plot(xs, ys, "o")
plt.show()
def test_interpolate():
fig = plt.figure()
ax = fig.add_subplot(111)
xs_fine = np.linspace(10.1, 24.9)
interp = interp1d(xs, ys)
ax.plot(xs_fine, interp(xs_fine))
ax.plot(xs, ys, "o")
plt.show()
#Take 2.5kms sampled spectrum, then Lanczos interpolate to the original PHOENIX wl points
def test_lan_intp():
wl_h, fl_h = np.abs(np.load("PH6.8kms_0.01ang.npy"))
wl_l, fl_l = np.abs(np.load("PH2.5kms.npy"))
#end edges
wl_he = wl_h[200:-200]
fl_he = fl_h[200:-200]
interp = Sinc_w(wl_l, fl_l, a=5, window='kaiser')
fl_hi = interp(wl_he)
#print(np.where(np.isnan(fl_hi)==True))
#print(len(fl_hi))
bad_array = np.array([355, 3688, 7021])
print(wl_he[bad_array])
#print(interp(wl_he[bad_array]))
fig, ax = plt.subplots(nrows=2, figsize=(8, 8))
ax[0].plot(wl_h, fl_h)
ax[0].plot(wl_l, fl_l, "go")
ax[0].plot(wl_he, fl_hi, "r.")
ax[1].plot(wl_h, fl_h)
ax[1].plot(wl_l, fl_l, "go")
ax[1].plot(wl_he, fl_hi, "r.")
plt.show()
#test_interpolate()
#plot_lan_intp()
#plot_FFTs()
#plot_windows()
#plot_kaiser_discrete()
#plot_sinc_windows()
#plot_pixel_effect()
#wl = plot_truncation()
#test_lan_intp()
compare_interpolated_spectrum()
|
BrownDwarf/Starfish
|
attic/lanczos_interp.py
|
Python
|
bsd-3-clause
| 12,431
|
[
"Gaussian"
] |
1415034973d7bf46b1e30fc0f4ece3d482f709686d252157ad30a2fe50ec3d76
|
""" StratusLabClient
Detailed implementation of the StratusLab methods for the StratusLabClient
class. Uses the Libcloud API to connect to the StratusLab services.
Author: Charles Loomis
"""
import os
import tempfile
from ConfigParser import SafeConfigParser
from StringIO import StringIO
from contextlib import closing
# FIXME: Hack, hack, hack...
# ensures that StratusLab Libcloud driver is loaded before use
from libcloud.compute.providers import set_driver
set_driver( 'stratuslab',
'stratuslab.libcloud.compute_driver',
'StratusLabNodeDriver' )
from libcloud.compute.base import NodeAuthSSHKey
from libcloud.compute.providers import get_driver
# DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
# VMDIRAC
from VMDIRAC.WorkloadManagementSystem.Client.SshContextualize import SshContextualize
class StratusLabClient( object ):
""" Implementation of the StratusLabImage functionality. """
def __init__( self, endpointConfiguration, imageConfiguration ):
"""
Initializes this class with the applianceIdentifier (Stratuslab Marketplace
image identifier), the cloud infrastructure, and the resource requirements
(size) of the instances.
NOTE: This constructor will raise an exception if there is a problem with
any of the configuration, either when creating the Libcloud driver or with
the given parameters.
:Parameters:
**endpointConfiguration** - `StratusLabEndpointConfiguration`
object containing the configuration for a StratusLab endpoint; this
object must have been validated before calling this constructor
**imageConfiguration** - `ImageConfiguration`
object containing the configuration for the appliance (image) to be
instantiated; this object must have been validated before calling
this constructor
"""
self.log = gLogger.getSubLogger(self.__class__.__name__)
# Create the configuration file for the StratusLab driver.
self.endpointConfig = endpointConfiguration.config()
path = StratusLabClient._create_stratuslab_config( self.endpointConfig )
try:
# Obtain instance of StratusLab driver.
StratusLabDriver = get_driver( 'stratuslab' )
self._driver = StratusLabDriver( 'unused-key', stratuslab_user_config = path )
finally:
try:
os.remove(path)
except:
pass
self.imageConfig = imageConfiguration.config()
self.image = self._get_image( self.imageConfig[ 'bootImageName' ] )
self.size = self._get_size( self.imageConfig[ 'flavorName' ] )
self.context_method = self.imageConfig[ 'contextMethod' ]
self.context_config = self.imageConfig[ 'contextConfig' ]
self.location = self._get_location()
def check_connection(self):
"""
Checks the connection by trying to list the running machine instances (nodes).
Note that listing the running nodes is not a standard Libcloud function.
:return: S_OK | S_ERROR
"""
try:
_ = self._driver.list_nodes()
return S_OK()
except Exception, errmsg:
return S_ERROR( errmsg )
def create( self, vmdiracInstanceID = '' ):
"""
This creates a new virtual machine instance based on the appliance identifier
and cloud identifier defined when this object was created.
Successful creation returns a tuple with the node object returned from the
StratusLab Libcloud driver and the public IP address of the instance.
NOTE: The node object should be treated as an opaque identifier by the
called and returned unmodified when calling the other methods of this class.
:return: S_OK( ( node, publicIP ) ) | S_ERROR
"""
# Get ssh key.
home = os.path.expanduser( '~' )
ssh_public_key_path = os.path.join( home, '.ssh', 'id_dsa.pub' )
with open(ssh_public_key_path) as f:
pubkey = NodeAuthSSHKey(f.read())
# Create the new instance, called a 'node' for Libcloud.
try:
node = self._driver.create_node( name = vmdiracInstanceID,
size = self.size,
location = self.location,
image = self.image,
auth = pubkey )
public_ips = node.public_ips
if len( public_ips ) > 0:
public_ip = public_ips[ 0 ]
else:
public_ip = None
return S_OK( ( node, public_ip ) )
except Exception, e:
return S_ERROR( e )
def status( self, node ):
"""
Return the state of the given node. This converts the Libcloud states (0-4)
to their DIRAC string equivalents. Note that this is not a reversible mapping.
:Parameters:
**node** - `string`
node object returned from the StratusLab Libcloud driver
:return: S_OK( status ) | S_ERROR
"""
state = node.state
# reversed from libcloud
STATE_MAP = { 0: 'RUNNING',
1: 'REBOOTING',
2: 'TERMINATED',
3: 'PENDING',
4: 'UNKNOWN' }
if not state in STATE_MAP:
return S_ERROR( 'invalid node state (%s) detected' % state )
return S_OK( STATE_MAP[ state ] )
def terminate( self, node, public_ip = '' ):
"""
Terminates the node with the given instanceId.
:Parameters:
**node** - `node`
node object returned from the StratusLab Libcloud driver
**public_ip** - `string`
parameter is ignored
:return: S_OK | S_ERROR
"""
try:
if node:
node.destroy()
return S_OK()
except Exception, e:
return S_ERROR( e )
def contextualize( self, node, public_ip ):
"""
Contextualize the given instance. This is currently a no-op.
This must return S_OK(node) on success!
:Parameters:
**node** - `node`
node object returned from the StratusLab Libcloud driver
**public_ip** - `string`
public IP assigned to the node if any
:return: S_OK(node) | S_ERROR
"""
self._driver.wait_until_running( [ node ] )
context_choices = {
'ssh' : self._ssh_contextualization,
'none' : self._noop_contextualization
}
try:
context_function = context_choices[ self.context_method ]
except KeyError, e:
return S_ERROR( 'invalid context method: %s' % self.context_method )
try:
result = context_function( node, public_ip )
if not result[ 'OK' ]:
return result
except Exception, e:
return S_ERROR( 'error running context function: %s' % e )
return S_OK( node )
def _get_location( self ):
locations = self._driver.list_locations()
if len( locations ) > 0:
return locations[ 0 ]
raise Exception( 'location cannot be found' )
def _get_image( self, applianceIdentifier ):
images = self._driver.list_images()
for image in images:
if image.id == applianceIdentifier:
return image
raise Exception( 'image for %s cannot be found' % applianceIdentifier )
def _get_size( self, sizeIdentifier ):
sizes = self._driver.list_sizes()
for size in sizes:
if size.id == sizeIdentifier:
return size
raise Exception( 'size for %s cannot be found' % sizeIdentifier )
@staticmethod
def _create_stratuslab_config( endpoint_params ):
"""
The argument, endpoint_params, is a dictionary with the configuration for
the StratusLab Libcloud API. These parameters are written to a
temporary file within a [default] section.
This function returns the name of the temporary file created. The
caller of this function is responsible for deleting this file.
"""
parser = SafeConfigParser()
for key, value in endpoint_params.items():
if key.startswith( 'ex_' ):
config_key = key.replace( 'ex_', '', 1 )
parser.set( None, config_key, value )
# Directly defining a 'default' section is not allowed, so
# get around this by replacing [DEFAULT] with [default].
with closing( StringIO() ) as mem_buffer:
parser.write( mem_buffer )
cfg = mem_buffer.getvalue().replace( '[DEFAULT]', '[default]', 1 )
_, path = tempfile.mkstemp( text = True )
with open(path, 'w') as f:
f.write( cfg )
return path
def _ssh_contextualization(self, node, public_ip, cpuTime ):
return SshContextualize().contextualise( self.imageConfig, self.endpointConfig,
uniqueId = str( node ),
publicIp = public_ip,
cpuTime = cpuTime )
def _noop_contextualization( self, node, public_ip ):
return S_OK()
#...............................................................................
#EOF
|
myco/VMDIRAC
|
WorkloadManagementSystem/Client/StratusLabClient.py
|
Python
|
gpl-3.0
| 8,891
|
[
"DIRAC"
] |
e8c1e76ff2b68a2fc0c382de6f002ece6a8fca81023900820eeee2459bd4ea29
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
"""
import collections
import datetime
import logging
import os
import re
import shlex
import subprocess
import sys
import tempfile
import time
import cmd_helper
import constants
import io_stats_parser
try:
import pexpect
except:
pexpect = None
sys.path.append(os.path.join(
constants.CHROME_DIR, 'third_party', 'android_testrunner'))
import adb_interface
import am_instrument_parser
import errors
# Pattern to search for the next whole line of pexpect output and capture it
# into a match group. We can't use ^ and $ for line start end with pexpect,
# see http://www.noah.org/python/pexpect/#doc for explanation why.
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
# Set the adb shell prompt to be a unique marker that will [hopefully] not
# appear at the start of any line of a command's output.
SHELL_PROMPT = '~+~PQ\x17RS~+~'
# Java properties file
LOCAL_PROPERTIES_PATH = '/data/local.prop'
# Property in /data/local.prop that controls Java assertions.
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
MEMORY_INFO_RE = re.compile('^(?P<key>\w+):\s+(?P<usage_kb>\d+) kB$')
NVIDIA_MEMORY_INFO_RE = re.compile('^\s*(?P<user>\S+)\s*(?P<name>\S+)\s*'
'(?P<pid>\d+)\s*(?P<usage_bytes>\d+)$')
# Keycode "enum" suitable for passing to AndroidCommands.SendKey().
KEYCODE_HOME = 3
KEYCODE_BACK = 4
KEYCODE_DPAD_UP = 19
KEYCODE_DPAD_DOWN = 20
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
MD5SUM_DEVICE_PATH = '/data/local/tmp/md5sum_bin'
def GetEmulators():
"""Returns a list of emulators. Does not filter by status (e.g. offline).
Both devices starting with 'emulator' will be returned in below output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
emulator-5558 device
"""
re_device = re.compile('^emulator-[0-9]+', re.MULTILINE)
devices = re_device.findall(cmd_helper.GetCmdOutput(['adb', 'devices']))
return devices
def GetAVDs():
"""Returns a list of AVDs."""
re_avd = re.compile('^[ ]+Name: ([a-zA-Z0-9_:.-]+)', re.MULTILINE)
avds = re_avd.findall(cmd_helper.GetCmdOutput(['android', 'list', 'avd']))
return avds
def GetAttachedDevices():
"""Returns a list of attached, online android devices.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
"""
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
devices = re_device.findall(cmd_helper.GetCmdOutput(['adb', 'devices']))
preferred_device = os.environ.get('ANDROID_SERIAL')
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def IsDeviceAttached(device):
return device in GetAttachedDevices()
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir) + 1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def _ComputeFileListHash(md5sum_output):
"""Returns a list of MD5 strings from the provided md5sum output."""
return [line.split(' ')[0] for line in md5sum_output]
def _HasAdbPushSucceeded(command_output):
"""Returns whether adb push has succeeded from the provided output."""
if not command_output:
return False
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
# Errors look like this: "failed to copy ... "
if not re.search('^[0-9]', command_output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + command_output)
return False
return True
def GetLogTimestamp(log_line, year):
"""Returns the timestamp of the given |log_line| in the given year."""
try:
return datetime.datetime.strptime('%s-%s' % (year, log_line[:18]),
'%Y-%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
"""
def __init__(self, device=None):
self._adb = adb_interface.AdbInterface()
if device:
self._adb.SetTargetSerial(device)
self._device = device
self._logcat = None
self.logcat_process = None
self._pushed_files = []
self._device_utc_offset = self.RunShellCommand('date +%z')[0]
self._md5sum_path = ''
self._external_storage = ''
self._util_wrapper = ''
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
return self._adb
def IsOnline(self):
"""Checks whether the device is online.
Returns:
True if device is in 'device' mode, False otherwise.
"""
out = self._adb.SendCommand('get-state')
return out.strip() == 'device'
def IsRootEnabled(self):
"""Checks if root is enabled on the device."""
root_test_output = self.RunShellCommand('ls /root') or ['']
return not 'Permission denied' in root_test_output[0]
def EnableAdbRoot(self):
"""Enables adb root on the device.
Returns:
True: if output from executing adb root was as expected.
False: otherwise.
"""
if self.GetBuildType() == 'user':
logging.warning("Can't enable root in production builds with type user")
return False
else:
return_value = self._adb.EnableAdbRoot()
# EnableAdbRoot inserts a call for wait-for-device only when adb logcat
# output matches what is expected. Just to be safe add a call to
# wait-for-device.
self._adb.SendCommand('wait-for-device')
return return_value
def GetDeviceYear(self):
"""Returns the year information of the date on device."""
return self.RunShellCommand('date +%Y')[0]
def GetExternalStorage(self):
if not self._external_storage:
self._external_storage = self.RunShellCommand('echo $EXTERNAL_STORAGE')[0]
assert self._external_storage, 'Unable to find $EXTERNAL_STORAGE'
return self._external_storage
def WaitForDevicePm(self):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm()
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s', e)
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
# connection; work out if we can handle this better
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot or not self.IsRootEnabled():
self._adb.SendCommand('reboot')
timeout = 300
else:
self.RestartShell()
timeout = 120
# To run tests we need at least the package manager and the sd card (or
# other external storage) to be ready.
self.WaitForDevicePm()
self.WaitForSdCardReady(timeout)
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
Returns:
A status string returned by adb uninstall
"""
uninstall_command = 'uninstall %s' % package
logging.info('>>> $' + uninstall_command)
return self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path, reinstall=False):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
reinstall: Reinstall an existing apk, keeping the data.
Returns:
A status string returned by adb install
"""
assert os.path.isfile(package_file_path), ('<%s> is not file' %
package_file_path)
install_cmd = ['install']
if reinstall:
install_cmd.append('-r')
install_cmd.append(package_file_path)
install_cmd = ' '.join(install_cmd)
logging.info('>>> $' + install_cmd)
return self._adb.SendCommand(install_cmd,
timeout_time=2 * 60,
retry_count=0)
def ManagedInstall(self, apk_path, keep_data=False, package_name=None,
reboots_on_failure=2):
"""Installs specified package and reboots device on timeouts.
Args:
apk_path: Path to .apk file to install.
keep_data: Reinstalls instead of uninstalling first, preserving the
application data.
package_name: Package name (only needed if keep_data=False).
reboots_on_failure: number of time to reboot if package manager is frozen.
Returns:
A status string returned by adb install
"""
reboots_left = reboots_on_failure
while True:
try:
if not keep_data:
assert package_name
self.Uninstall(package_name)
install_status = self.Install(apk_path, reinstall=keep_data)
if 'Success' in install_status:
return install_status
except errors.WaitForResponseTimedOutError:
print '@@@STEP_WARNINGS@@@'
logging.info('Timeout on installing %s' % apk_path)
if reboots_left <= 0:
raise Exception('Install failure')
# Force a hard reboot on last attempt
self.Reboot(full_reboot=(reboots_left == 1))
reboots_left -= 1
def MakeSystemFolderWritable(self):
"""Remounts the /system folder rw."""
out = self._adb.SendCommand('remount')
if out.strip() != 'remount succeeded':
raise errors.MsgException('Remount failed: %s' % out)
def RestartAdbServer(self):
"""Restart the adb server."""
self.KillAdbServer()
self.StartAdbServer()
def KillAdbServer(self):
"""Kill adb server."""
adb_cmd = ['adb', 'kill-server']
return cmd_helper.RunCmd(adb_cmd)
def StartAdbServer(self):
"""Start adb server."""
adb_cmd = ['adb', 'start-server']
return cmd_helper.RunCmd(adb_cmd)
def WaitForSystemBootCompleted(self, wait_time):
"""Waits for targeted system's boot_completed flag to be set.
Args:
wait_time: time in seconds to wait
Raises:
WaitForResponseTimedOutError if wait_time elapses and flag still not
set.
"""
logging.info('Waiting for system boot completed...')
self._adb.SendCommand('wait-for-device')
# Now the device is there, but system not boot completed.
# Query the sys.boot_completed flag with a basic command
boot_completed = False
attempts = 0
wait_period = 5
while not boot_completed and (attempts * wait_period) < wait_time:
output = self._adb.SendShellCommand('getprop sys.boot_completed',
retry_count=1)
output = output.strip()
if output == '1':
boot_completed = True
else:
# If 'error: xxx' returned when querying the flag, it means
# adb server lost the connection to the emulator, so restart the adb
# server.
if 'error:' in output:
self.RestartAdbServer()
time.sleep(wait_period)
attempts += 1
if not boot_completed:
raise errors.WaitForResponseTimedOutError(
'sys.boot_completed flag was not set after %s seconds' % wait_time)
def WaitForSdCardReady(self, timeout_time):
"""Wait for the SD card ready before pushing data into it."""
logging.info('Waiting for SD card ready...')
sdcard_ready = False
attempts = 0
wait_period = 5
external_storage = self.GetExternalStorage()
while not sdcard_ready and attempts * wait_period < timeout_time:
output = self.RunShellCommand('ls ' + external_storage)
if output:
sdcard_ready = True
else:
time.sleep(wait_period)
attempts += 1
if not sdcard_ready:
raise errors.WaitForResponseTimedOutError(
'SD card not ready after %s seconds' % timeout_time)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=False):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send. Must not include
the single quotes as we use them to escape the whole command.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
logging.info('>>> $' + command)
if "'" in command: logging.warning(command + " contains ' quotes")
result = self._adb.SendShellCommand(
"'%s'" % command, timeout_time).splitlines()
if ['error: device not found'] == result:
raise errors.DeviceUnresponsiveError('device not found')
if log_result:
logging.info('\n>>> '.join(result))
return result
def GetShellCommandStatusAndOutput(self, command, timeout_time=20,
log_result=False):
"""See RunShellCommand() above.
Returns:
The tuple (exit code, list of output lines).
"""
lines = self.RunShellCommand(
command + '; echo %$?', timeout_time, log_result)
last_line = lines[-1]
status_pos = last_line.rfind('%')
assert status_pos >= 0
status = int(last_line[status_pos + 1:])
if status_pos == 0:
lines = lines[:-1]
else:
lines = lines[:-1] + [last_line[:status_pos]]
return (status, lines)
def KillAll(self, process):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off
Returns:
the number of processes killed
"""
pids = self.ExtractPid(process)
if pids:
self.RunShellCommand('kill ' + ' '.join(pids))
return len(pids)
def KillAllBlocking(self, process, timeout_sec):
"""Blocking version of killall, connected via adb.
This waits until no process matching the corresponding name appears in ps'
output anymore.
Args:
process: name of the process to kill off
timeout_sec: the timeout in seconds
Returns:
the number of processes killed
"""
processes_killed = self.KillAll(process)
if processes_killed:
elapsed = 0
wait_period = 0.1
# Note that this doesn't take into account the time spent in ExtractPid().
while self.ExtractPid(process) and elapsed < timeout_sec:
time.sleep(wait_period)
elapsed += wait_period
if elapsed >= timeout_sec:
return 0
return processes_killed
def _GetActivityCommand(self, package, activity, wait_for_completion, action,
category, data, extras, trace_file_name, force_stop):
"""Creates command to start |package|'s activity on the device.
Args - as for StartActivity
Returns:
the command to run on the target to start the activity
"""
cmd = 'am start -a %s' % action
if force_stop:
cmd += ' -S'
if wait_for_completion:
cmd += ' -W'
if category:
cmd += ' -c %s' % category
if package and activity:
cmd += ' -n %s/%s' % (package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
for key in extras:
value = extras[key]
if isinstance(value, str):
cmd += ' --es'
elif isinstance(value, bool):
cmd += ' --ez'
elif isinstance(value, int):
cmd += ' --ei'
else:
raise NotImplementedError(
'Need to teach StartActivity how to pass %s extras' % type(value))
cmd += ' %s %s' % (key, value)
if trace_file_name:
cmd += ' --start-profiler ' + trace_file_name
return cmd
def StartActivity(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.google.android.apps.chrome').
activity: Name of activity (e.g. '.Main' or
'com.google.android.apps.chrome.Main').
wait_for_completion: wait for the activity to finish launching (-W flag).
action: string (e.g. "android.intent.action.MAIN"). Default is VIEW.
category: string (e.g. "android.intent.category.HOME")
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity. Values are significant.
trace_file_name: If used, turns on and saves the trace to this file name.
force_stop: force stop the target app before starting the activity (-S
flag).
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop)
self.RunShellCommand(cmd)
def StartActivityTimed(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False):
"""Starts |package|'s activity on the device, returning the start time
Args - as for StartActivity
Returns:
a timestamp string for the time at which the activity started
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop)
self.StartMonitoringLogcat()
self.RunShellCommand('log starting activity; ' + cmd)
activity_started_re = re.compile('.*starting activity.*')
m = self.WaitForLogMatch(activity_started_re, None)
assert m
start_line = m.group(0)
return GetLogTimestamp(start_line, self.GetDeviceYear())
def GoHome(self):
"""Tell the device to return to the home screen. Blocks until completion."""
self.RunShellCommand('am start -W '
'-a android.intent.action.MAIN -c android.intent.category.HOME')
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g.
com.google.android.apps.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
# Check that the package exists before clearing it. Necessary because
# calling pm clear on a package that doesn't exist may never return.
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output only contains anything if and only if the package exists.
if pm_path_output:
self.CloseApplication(package)
self.RunShellCommand('pm clear ' + package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def PushIfNeeded(self, local_path, device_path):
"""Pushes |local_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same hash.
All pushed files can be removed by calling RemovePushedFiles().
"""
assert os.path.exists(local_path), 'Local path not found %s' % local_path
if not self._md5sum_path:
default_build_type = os.environ.get('BUILD_TYPE', 'Debug')
md5sum_path = '%s/%s/md5sum_bin' % (cmd_helper.OutDirectory.get(),
default_build_type)
if not os.path.exists(md5sum_path):
md5sum_path = '%s/Release/md5sum_bin' % cmd_helper.OutDirectory.get()
assert os.path.exists(md5sum_path), 'Please build md5sum.'
command = 'push %s %s' % (md5sum_path, MD5SUM_DEVICE_PATH)
assert _HasAdbPushSucceeded(self._adb.SendCommand(command))
self._md5sum_path = md5sum_path
self._pushed_files.append(device_path)
hashes_on_device = _ComputeFileListHash(
self.RunShellCommand(self._util_wrapper + ' ' + MD5SUM_DEVICE_PATH +
' ' + device_path))
assert os.path.exists(local_path), 'Local path not found %s' % local_path
hashes_on_host = _ComputeFileListHash(
subprocess.Popen(
'%s_host %s' % (self._md5sum_path, local_path),
stdout=subprocess.PIPE, shell=True).stdout)
if hashes_on_device == hashes_on_host:
return
# They don't match, so remove everything first and then create it.
if os.path.isdir(local_path):
self.RunShellCommand('rm -r %s' % device_path, timeout_time=2 * 60)
self.RunShellCommand('mkdir -p %s' % device_path)
# NOTE: We can't use adb_interface.Push() because it hardcodes a timeout of
# 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (local_path, device_path)
logging.info('>>> $' + push_command)
output = self._adb.SendCommand(push_command, timeout_time=30 * 60)
assert _HasAdbPushSucceeded(output)
def GetFileContents(self, filename, log_result=False):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('cat "%s" 2>/dev/null' % filename,
log_result=log_result)
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
_TEMP_FILE_BASE_FMT = 'temp_file_%d'
_TEMP_SCRIPT_FILE_BASE_FMT = 'temp_script_file_%d.sh'
def _GetDeviceTempFileName(self, base_name):
i = 0
while self.FileExistsOnDevice(
self.GetExternalStorage() + '/' + base_name % i):
i += 1
return self.GetExternalStorage() + '/' + base_name % i
def CanAccessProtectedFileContents(self):
"""Returns True if Get/SetProtectedFileContents would work via "su".
Devices running user builds don't have adb root, but may provide "su" which
can be used for accessing protected files.
"""
r = self.RunShellCommand('su -c cat /dev/null')
return r == [] or r[0].strip() == ''
def GetProtectedFileContents(self, filename, log_result=False):
"""Gets contents from the protected file specified by |filename|.
This is less efficient than GetFileContents, but will work for protected
files and device files.
"""
# Run the script as root
return self.RunShellCommand('su -c cat "%s" 2> /dev/null' % filename)
def SetProtectedFileContents(self, filename, contents):
"""Writes |contents| to the protected file specified by |filename|.
This is less efficient than SetFileContents, but will work for protected
files and device files.
"""
temp_file = self._GetDeviceTempFileName(AndroidCommands._TEMP_FILE_BASE_FMT)
temp_script = self._GetDeviceTempFileName(
AndroidCommands._TEMP_SCRIPT_FILE_BASE_FMT)
# Put the contents in a temporary file
self.SetFileContents(temp_file, contents)
# Create a script to copy the file contents to its final destination
self.SetFileContents(temp_script, 'cat %s > %s' % (temp_file, filename))
# Run the script as root
self.RunShellCommand('su -c sh %s' % temp_script)
# And remove the temporary files
self.RunShellCommand('rm ' + temp_file)
self.RunShellCommand('rm ' + temp_script)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2 * 60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- 1 user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self._device_utc_offset)
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
properties = file(temp_props_file.name).read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.RunShellCommand('getprop ' + JAVA_ASSERT_PROPERTY)
if was_set == enable:
return False
self.RunShellCommand('setprop %s "%s"' % (JAVA_ASSERT_PROPERTY,
enable and 'all' or ''))
return True
def GetBuildId(self):
"""Returns the build ID of the system (e.g. JRM79C)."""
build_id = self.RunShellCommand('getprop ro.build.id')[0]
assert build_id
return build_id
def GetBuildType(self):
"""Returns the build type of the system (e.g. eng)."""
build_type = self.RunShellCommand('getprop ro.build.type')[0]
assert build_type
return build_type
def GetProductModel(self):
"""Returns the namve of the product model (e.g. "Galaxy Nexus") """
model = self.RunShellCommand('getprop ro.product.model')[0]
assert model
return model
def StartMonitoringLogcat(self, clear=True, logfile=None, filters=None):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = []
if self._adb._target_arg:
args += shlex.split(self._adb._target_arg)
args += ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
if logfile:
logfile = NewLineNormalizer(logfile)
# Spawn logcat and syncronize with it.
for _ in range(4):
self._logcat = pexpect.spawn('adb', args, timeout=10, logfile=logfile)
self.RunShellCommand('log startup_sync')
if self._logcat.expect(['startup_sync', pexpect.EOF,
pexpect.TIMEOUT]) == 0:
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, success_re, error_re, clear=False, timeout=10):
"""Blocks until a matching line is logged or a timeout occurs.
Args:
success_re: A compiled re to search each line for.
error_re: A compiled re which, if found, terminates the search for
|success_re|. If None is given, no error condition will be detected.
clear: If True the existing logcat output will be cleared, defaults to
false.
timeout: Timeout in seconds to wait for a log match.
Raises:
pexpect.TIMEOUT after |timeout| seconds without a match for |success_re|
or |error_re|.
Returns:
The re match object if |success_re| is matched first or None if |error_re|
is matched first.
"""
logging.info('<<< Waiting for logcat:' + str(success_re.pattern))
t0 = time.time()
while True:
if not self._logcat:
self.StartMonitoringLogcat(clear)
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + timeout - time.time()
if time_remaining < 0: raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
if error_re:
error_match = error_re.search(line)
if error_match:
return None
success_match = success_re.search(line)
if success_match:
return success_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(timeout, success_re.pattern))
except pexpect.EOF:
# It seems that sometimes logcat can end unexpectedly. This seems
# to happen during Chrome startup after a reboot followed by a cache
# clean. I don't understand why this happens, but this code deals with
# getting EOF in logcat.
logging.critical('Found EOF in adb logcat. Restarting...')
# Rerun spawn with original arguments. Note that self._logcat.args[0] is
# the path of adb, so we don't want it in the arguments.
self._logcat = pexpect.spawn('adb',
self._logcat.args[1:],
timeout=self._logcat.timeout,
logfile=self._logcat.logfile)
def StartRecordingLogcat(self, clear=True, filters=['*:v']):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb %s logcat -v threadtime %s' % (self._adb._target_arg,
' '.join(filters))
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=subprocess.PIPE)
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() is None:
self.logcat_process.kill()
(output, _) = self.logcat_process.communicate()
self.logcat_process = None
return output
def SearchLogcatRecord(self, record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
If the name of a process exactly matches the given name, the pid of
that process will be inserted to the front of the pid list.
"""
pids = []
for line in self.RunShellCommand('ps', log_result=False):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
if process_name == data[-1]:
pids.insert(0, data[1]) # PID is in the second column
else:
pids.append(data[1])
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
for line in self.GetFileContents('/proc/diskstats', log_result=False):
stats = io_stats_parser.ParseIoStatsLine(line)
if stats.device == 'mmcblk0':
return {
'num_reads': stats.num_reads_issued,
'num_writes': stats.num_writes_completed,
'read_ms': stats.ms_spent_reading,
'write_ms': stats.ms_spent_writing,
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsageForPid(self, pid):
"""Returns the memory usage for given pid.
Args:
pid: The pid number of the specific process running on device.
Returns:
A tuple containg:
[0]: Dict of {metric:usage_kb}, for the process which has specified pid.
The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
Shared_Dirty, Private_Clean, Private_Dirty, Referenced, Swap,
KernelPageSize, MMUPageSize, Nvidia (tablet only).
[1]: Detailed /proc/[PID]/smaps information.
"""
usage_dict = collections.defaultdict(int)
smaps = collections.defaultdict(dict)
current_smap = ''
for line in self.GetProtectedFileContents('/proc/%s/smaps' % pid,
log_result=False):
items = line.split()
# See man 5 proc for more details. The format is:
# address perms offset dev inode pathname
if len(items) > 5:
current_smap = ' '.join(items[5:])
elif len(items) > 3:
current_smap = ' '.join(items[3:])
match = re.match(MEMORY_INFO_RE, line)
if match:
key = match.group('key')
usage_kb = int(match.group('usage_kb'))
usage_dict[key] += usage_kb
if key not in smaps[current_smap]:
smaps[current_smap][key] = 0
smaps[current_smap][key] += usage_kb
if not usage_dict or not any(usage_dict.values()):
# Presumably the process died between ps and calling this method.
logging.warning('Could not find memory usage for pid ' + str(pid))
for line in self.GetProtectedFileContents('/d/nvmap/generic-0/clients',
log_result=False):
match = re.match(NVIDIA_MEMORY_INFO_RE, line)
if match and match.group('pid') == pid:
usage_bytes = int(match.group('usage_bytes'))
usage_dict['Nvidia'] = int(round(usage_bytes / 1000.0)) # kB
break
return (usage_dict, smaps)
def GetMemoryUsageForPackage(self, package):
"""Returns the memory usage for all processes whose name contains |pacakge|.
Args:
package: A string holding process name to lookup pid list for.
Returns:
A tuple containg:
[0]: Dict of {metric:usage_kb}, summed over all pids associated with
|name|.
The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
Shared_Dirty, Private_Clean, Private_Dirty, Referenced, Swap,
KernelPageSize, MMUPageSize, Nvidia (tablet only).
[1]: a list with detailed /proc/[PID]/smaps information.
"""
usage_dict = collections.defaultdict(int)
pid_list = self.ExtractPid(package)
smaps = collections.defaultdict(dict)
for pid in pid_list:
usage_dict_per_pid, smaps_per_pid = self.GetMemoryUsageForPid(pid)
smaps[pid] = smaps_per_pid
for (key, value) in usage_dict_per_pid.items():
usage_dict[key] += value
return usage_dict, smaps
def ProcessesUsingDevicePort(self, device_port):
"""Lists processes using the specified device port on loopback interface.
Args:
device_port: Port on device we want to check.
Returns:
A list of (pid, process_name) tuples using the specified port.
"""
tcp_results = self.RunShellCommand('cat /proc/net/tcp', log_result=False)
tcp_address = '0100007F:%04X' % device_port
pids = []
for single_connect in tcp_results:
connect_results = single_connect.split()
# Column 1 is the TCP port, and Column 9 is the inode of the socket
if connect_results[1] == tcp_address:
socket_inode = connect_results[9]
socket_name = 'socket:[%s]' % socket_inode
lsof_results = self.RunShellCommand('lsof', log_result=False)
for single_process in lsof_results:
process_results = single_process.split()
# Ignore the line if it has less than nine columns in it, which may
# be the case when a process stops while lsof is executing.
if len(process_results) <= 8:
continue
# Column 0 is the executable name
# Column 1 is the pid
# Column 8 is the Inode in use
if process_results[8] == socket_name:
pids.append((int(process_results[1]), process_results[0]))
break
logging.info('PidsUsingDevicePort: %s', pids)
return pids
def FileExistsOnDevice(self, file_name):
"""Checks whether the given file exists on the device.
Args:
file_name: Full path of file to check.
Returns:
True if the file exists, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -e "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
status = self._adb.SendShellCommand(
'\'ls "%s" >/dev/null 2>&1; echo $?\'' % (file_name))
return int(status) == 0
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
def TakeScreenshot(self, host_file):
"""Saves a screenshot image to |host_file| on the host.
Args:
host_file: Absolute path to the image file to store on the host.
"""
host_dir = os.path.dirname(host_file)
if not os.path.exists(host_dir):
os.makedirs(host_dir)
device_file = '%s/screenshot.png' % self.GetExternalStorage()
self.RunShellCommand('/system/bin/screencap -p %s' % device_file)
assert self._adb.Pull(device_file, host_file)
assert os.path.exists(host_file)
def SetUtilWrapper(self, util_wrapper):
"""Sets a wrapper prefix to be used when running a locally-built
binary on the device (ex.: md5sum_bin).
"""
self._util_wrapper = util_wrapper
def RunInstrumentationTest(self, test, test_package, instr_args, timeout):
"""Runs a single instrumentation test.
Args:
test: Test class/method.
test_package: Package name of test apk.
instr_args: Extra key/value to pass to am instrument.
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
"""
instrumentation_path = ('%s/android.test.InstrumentationTestRunner' %
test_package)
args_with_filter = dict(instr_args)
args_with_filter['class'] = test
logging.info(args_with_filter)
(raw_results, _) = self._adb.StartInstrumentation(
instrumentation_path=instrumentation_path,
instrumentation_args=args_with_filter,
timeout_time=timeout)
assert len(raw_results) == 1
return raw_results[0]
def RunUIAutomatorTest(self, test, test_package, timeout):
"""Runs a single uiautomator test.
Args:
test: Test class/method.
test_package: Name of the test jar.
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
"""
cmd = 'uiautomator runtest %s -e class %s' % (test_package, test)
logging.info('>>> $' + cmd)
output = self._adb.SendShellCommand(cmd, timeout_time=timeout)
# uiautomator doesn't fully conform to the instrumenation test runner
# convention and doesn't terminate with INSTRUMENTATION_CODE.
# Just assume the first result is valid.
(test_results, _) = am_instrument_parser.ParseAmInstrumentOutput(output)
return test_results[0]
class NewLineNormalizer(object):
"""A file-like object to normalize EOLs to '\n'.
Pexpect runs adb within a pseudo-tty device (see
http://www.noah.org/wiki/pexpect), so any '\n' printed by adb is written
as '\r\n' to the logfile. Since adb already uses '\r\n' to terminate
lines, the log ends up having '\r\r\n' at the end of each line. This
filter replaces the above with a single '\n' in the data stream.
"""
def __init__(self, output):
self._output = output
def write(self, data):
data = data.replace('\r\r\n', '\n')
self._output.write(data)
def flush(self):
self._output.flush()
|
timopulkkinen/BubbleFish
|
build/android/pylib/android_commands.py
|
Python
|
bsd-3-clause
| 47,266
|
[
"Galaxy"
] |
ffe387c29d6bddbf916e29ad926e32d84c461dc3286ae40e7438688ff9e1f552
|
# -*- coding: utf-8 -*-
"""
codegen
~~~~~~~
Extension to ast that allow ast -> python code generation.
:copyright: Copyright 2008 by Armin Ronacher.
:license: BSD.
"""
from ast import *
def to_source(node, indent_with=' ' * 4, add_line_information=False):
"""This function can convert a node tree back into python sourcecode.
This is useful for debugging purposes, especially if you're dealing with
custom asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
If `add_line_information` is set to `True` comments for the line numbers
of the nodes are added to the output. This can be used to spot wrong line
number information of statement nodes.
"""
generator = SourceGenerator(indent_with, add_line_information)
generator.visit(node)
return ''.join(generator.result)
class SourceGenerator(NodeVisitor):
"""This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with, add_line_information=False):
self.result = []
self.indent_with = indent_with
self.add_line_information = add_line_information
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, node=None, extra=0):
self.new_lines = max(self.new_lines, 1 + extra)
if node is not None and self.add_line_information:
self.write('# line: %s' % node.lineno)
self.new_lines = 1
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline(decorator)
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline(node)
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline(node)
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline(node)
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline(node)
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline(node)
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(extra=1)
self.decorators(node)
self.newline(node)
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(extra=2)
self.decorators(node)
self.newline(node)
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline(node)
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline(node)
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline(node)
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline(node)
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline(node)
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline(node)
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline(node)
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline(node)
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline(node)
self.write('try:')
self.body(node.body)
self.newline(node)
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline(node)
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline(node)
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline(node)
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline(node)
self.write('break')
def visit_Continue(self, node):
self.newline(node)
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline(node)
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.write(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s %%' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline(node)
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
|
laanwj/ast_pickler
|
codegen.py
|
Python
|
mit
| 15,495
|
[
"VisIt"
] |
fadc2c2ed032ffc7e5282d784a6dcc6a51fcf838fa952b2bd662e1665baea1f7
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def mean_and_std(a, axis=None, weights=None, with_mean=True, with_std=True,
ddof=0):
"""Compute the weighted average and standard deviation along the
specified axis.
Parameters
----------
a : array_like
Calculate average and standard deviation of these values.
axis : int, optional
Axis along which the statistics are computed. The default is
to compute them on the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each
value in `a` contributes to the average according to its
associated weight. The weights array can either be 1-D (in
which case its length must be the size of `a` along the given
axis) or of the same shape as `a`. If `weights=None`, then all
data in `a` are assumed to have a weight equal to one.
with_mean : bool, optional, defaults to True
Compute average if True.
with_std : bool, optional, defaults to True
Compute standard deviation if True.
ddof : int, optional, defaults to 0
It means delta degrees of freedom. Variance is calculated by
dividing by `n - ddof` (where `n` is the number of
elements). By default it computes the maximum likelyhood
estimator.
Returns
-------
average, std
Return the average and standard deviation along the specified
axis. If any of them was not required, returns `None` instead
"""
if not (with_mean or with_std):
raise ValueError("Either the mean or standard deviation need to be"
" computed.")
a = np.asarray(a)
if weights is None:
avg = a.mean(axis=axis) if with_mean else None
std = a.std(axis=axis, ddof=ddof) if with_std else None
else:
avg = np.average(a, axis=axis, weights=weights)
if with_std:
if axis is None:
variance = np.average((a - avg)**2, weights=weights)
else:
# Make sure that the subtraction to compute variance works for
# multidimensional arrays
a_rolled = np.rollaxis(a, axis)
# Numpy doesn't have a weighted std implementation, but this is
# stable and fast
variance = np.average((a_rolled - avg)**2, axis=0,
weights=weights)
if ddof != 0: # Don't waste time if variance doesn't need scaling
if axis is None:
variance *= a.size / (a.size - ddof)
else:
variance *= a.shape[axis] / (a.shape[axis] - ddof)
std = np.sqrt(variance)
else:
std = None
avg = avg if with_mean else None
return avg, std
@experimental(as_of="0.4.0")
def scale(a, weights=None, with_mean=True, with_std=True, ddof=0, copy=True):
"""Scale array by columns to have weighted average 0 and standard
deviation 1.
Parameters
----------
a : array_like
2D array whose columns are standardized according to the
weights.
weights : array_like, optional
Array of weights associated with the columns of `a`. By
default, the scaling is unweighted.
with_mean : bool, optional, defaults to True
Center columns to have 0 weighted mean.
with_std : bool, optional, defaults to True
Scale columns to have unit weighted std.
ddof : int, optional, defaults to 0
If with_std is True, variance is calculated by dividing by `n
- ddof` (where `n` is the number of elements). By default it
computes the maximum likelyhood stimator.
copy : bool, optional, defaults to True
Whether to perform the standardization in place, or return a
new copy of `a`.
Returns
-------
2D ndarray
Scaled array.
Notes
-----
Wherever std equals 0, it is replaced by 1 in order to avoid
division by zero.
"""
if copy:
a = a.copy()
avg, std = mean_and_std(a, axis=0, weights=weights, with_mean=with_mean,
with_std=with_std, ddof=ddof)
if with_mean:
a -= avg
if with_std:
std[std == 0] = 1.0
a /= std
return a
@experimental(as_of="0.4.0")
def svd_rank(M_shape, S, tol=None):
"""Matrix rank of `M` given its singular values `S`.
See `np.linalg.matrix_rank` for a rationale on the tolerance
(we're not using that function because it doesn't let us reuse a
precomputed SVD)."""
if tol is None:
tol = S.max() * max(M_shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
@experimental(as_of="0.4.0")
def corr(x, y=None):
"""Computes correlation between columns of `x`, or `x` and `y`.
Correlation is covariance of (columnwise) standardized matrices,
so each matrix is first centered and scaled to have variance one,
and then their covariance is computed.
Parameters
----------
x : 2D array_like
Matrix of shape (n, p). Correlation between its columns will
be computed.
y : 2D array_like, optional
Matrix of shape (n, q). If provided, the correlation is
computed between the columns of `x` and the columns of
`y`. Else, it's computed between the columns of `x`.
Returns
-------
correlation
Matrix of computed correlations. Has shape (p, p) if `y` is
not provided, else has shape (p, q).
"""
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if y.shape[0] != x.shape[0]:
raise ValueError("Both matrices must have the same number of rows")
x, y = scale(x), scale(y)
else:
x = scale(x)
y = x
# Notice that scaling was performed with ddof=0 (dividing by n,
# the default), so now we need to remove it by also using ddof=0
# (dividing by n)
return x.T.dot(y) / x.shape[0]
@experimental(as_of="0.4.0")
def e_matrix(distance_matrix):
"""Compute E matrix from a distance matrix.
Squares and divides by -2 the input elementwise. Eq. 9.20 in
Legendre & Legendre 1998."""
return distance_matrix * distance_matrix / -2
def f_matrix(E_matrix):
"""Compute F matrix from E matrix.
Centring step: for each element, the mean of the corresponding
row and column are substracted, and the mean of the whole
matrix is added. Eq. 9.21 in Legendre & Legendre 1998."""
row_means = E_matrix.mean(axis=1, keepdims=True)
col_means = E_matrix.mean(axis=0, keepdims=True)
matrix_mean = E_matrix.mean()
return E_matrix - row_means - col_means + matrix_mean
|
SamStudio8/scikit-bio
|
skbio/stats/ordination/_utils.py
|
Python
|
bsd-3-clause
| 7,224
|
[
"scikit-bio"
] |
79819ce085d618603aac685f8946c6086390b5df444700d9e73af62ba51f15e1
|
import argparse
import numpy as np
import pandas as pd
import pyproj
from netCDF4 import Dataset
from make_proj_grids import read_ncar_map_file, make_proj_grids
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start_date", required=True, help="Start date in YYYY-MM-DD format")
parser.add_argument("-e", "--end_date", required=False, help="End date in YYYY-MM-DD format")
parser.add_argument("-o", "--out_path", required=True, help="Path to the destination of MESH verification data")
parser.add_argument("-m", "--map_file", required=True, help="Path to the ensemble mapfile")
args = parser.parse_args()
if args.end_date:
run_dates = pd.date_range(start=args.start_date, end=args.end_date, freq='1D').strftime("%y%m%d")
else:
run_dates = pd.date_range(start=args.start_date, end=args.start_date, freq='1D').strftime("%y%m%d")
out_path = args.out_path
mapfile = args.map_file
LSR_calibration_data(mapfile, out_path, run_dates)
return
def LSR_calibration_data(mapfile, out_path, run_dates, hours=[17, 19, 21], sector=None):
"""
Using the grid from input ML forecast (netcdf) data, SPC storm reports
with a 25 mile radius around the reports can be plotted.
The output file contains binary data, where any point
within the 25 mile radius is a 1, and all other points are 0.
Currently only supports netcdf files.
"""
hail_threshold = [25, 50]
lsr_dict = dict()
proj_dict, grid_dict = read_ncar_map_file(mapfile)
projection = pyproj.Proj(proj_dict)
mapping_data = make_proj_grids(proj_dict, grid_dict)
forecast_lons = np.array(mapping_data['lon'])
forecast_lats = np.array(mapping_data['lat'])
forecast_x = np.array(mapping_data['x'])
forecast_y = np.array(mapping_data['y'])
for date in run_dates:
print(date)
csv_file = 'https://www.spc.noaa.gov/climo/reports/{0}_rpts_hail.csv'.format(date)
try:
hail_reports = pd.read_csv(csv_file)
except:
print('Report CSV file could not be opened.')
continue
for threshold in hail_threshold:
# if os.path.exists(out_path+'{0}_{1}_lsr_mask.nc'.format(date,threshold)):
# print('>{0}mm file already exists'.format(threshold))
# continue
# print('Creating LSR mask >{0}mm'.format(threshold))
# Get size values from hail reports
inches_thresh = round((threshold) * 0.03937) * 100
report_size = hail_reports.loc[:, 'Size'].values
lsr_dict['full_day'] = np.zeros(forecast_lats.shape)
full_day_indices = np.where(report_size >= inches_thresh)[0]
if len(full_day_indices) < 1:
print('No >{0}mm LSRs found'.format(threshold))
continue
reports_lat_full = hail_reports.loc[full_day_indices, 'Lat'].values
reports_lon_full = hail_reports.loc[full_day_indices, 'Lon'].values
lsr_dict['full_day'] = calculate_distance(reports_lat_full, reports_lon_full, forecast_y, forecast_x,
projection)
# Get time values from hail reports
report_time = (hail_reports.loc[:, 'Time'].values) / 100
# Get lat/lon of different time periods and hail sizes
for start_hour in hours:
lsr_dict['{0}'.format(start_hour)] = np.zeros(forecast_lats.shape)
end_hour = (start_hour + 4) % 24
if end_hour > 12:
hour_indices = \
np.where((start_hour <= report_time) & (end_hour >= report_time) & (report_size >= inches_thresh))[
0]
else:
# Find reports before and after 0z
hour_before_0z = np.where((start_hour <= report_time) & (report_size >= inches_thresh))[0]
hour_after_0z = np.where((end_hour >= report_time) & (report_size >= inches_thresh))[0]
# Combine two arrays
hour_indices = np.hstack((hour_before_0z, hour_after_0z))
if len(hour_indices) < 1:
continue
reports_lat = hail_reports.loc[hour_indices, 'Lat'].values
reports_lon = hail_reports.loc[hour_indices, 'Lon'].values
lsr_dict['{0}'.format(start_hour)] = calculate_distance(reports_lat, reports_lon, forecast_y,
forecast_x, projection)
# Create netCDF file
if sector:
out_filename = out_path + '{0}_{1}_{2}_lsr_mask.nc'.format(date, threshold, sector)
else:
out_filename = out_path + '{0}_{1}_lsr_mask.nc'.format(date, threshold)
out_file = Dataset(out_filename, "w")
out_file.createDimension("x", forecast_lons.shape[0])
out_file.createDimension("y", forecast_lons.shape[1])
out_file.createVariable("Longitude", "f4", ("x", "y"))
out_file.createVariable("Latitude", "f4", ("x", "y"))
out_file.createVariable("24_Hour_All_12z_12z", "f4", ("x", "y"))
out_file.createVariable("4_Hour_All_17z_21z", "f4", ("x", "y"))
out_file.createVariable("4_Hour_All_19z_23z", "f4", ("x", "y"))
out_file.createVariable("4_Hour_All_21z_25z", "f4", ("x", "y"))
out_file.variables["Longitude"][:, :] = forecast_lons
out_file.variables["Latitude"][:, :] = forecast_lats
out_file.variables["24_Hour_All_12z_12z"][:, :] = lsr_dict['full_day']
out_file.variables["4_Hour_All_17z_21z"][:, :] = lsr_dict['17']
out_file.variables["4_Hour_All_19z_23z"][:, :] = lsr_dict['19']
out_file.variables["4_Hour_All_21z_25z"][:, :] = lsr_dict['21']
out_file.close()
print("Writing to " + out_filename)
print()
return
def calculate_distance(obs_lat, obs_lon, forecast_y, forecast_x, projection):
"""
Calculate the difference between forecast data points and observed data.
Returns:
Binary array where ones are within a 40km radius
"""
x, y = projection(obs_lon, obs_lat)
mask_array = np.zeros(forecast_y.shape)
for index, point in enumerate(obs_lat):
y_diff = (y[index] - forecast_y) ** 2.0
x_diff = (x[index] - forecast_x) ** 2.0
total_dist = np.sqrt(y_diff + x_diff)
row, col = np.where(total_dist < 40234.0)
mask_array[row, col] = + 1.0
return mask_array
if __name__ == "__main__":
main()
|
djgagne/hagelslag
|
hagelslag/util/lsr_calibration_dataset.py
|
Python
|
mit
| 6,723
|
[
"NetCDF"
] |
ebc482acaa4b7781c8db027847ae2d126f717f0e90881472f6369becab20a2a1
|
import sys
from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
from scipy.optimize import leastsq
from lmfit import Parameters, Minimizer, report_fit
from lmfit.lineshapes import gaussian
try:
import pylab
HASPYLAB = True
except ImportError:
HASPYLAB = False
HASPYLAB = False
def residual(pars, x, sigma=None, data=None):
yg = gaussian(x, pars['amp_g'].value,
pars['cen_g'].value, pars['wid_g'].value)
slope = pars['line_slope'].value
offset = pars['line_off'].value
model = yg + offset + x * slope
if data is None:
return model
if sigma is None:
return (model - data)
return (model - data)/sigma
n = 201
xmin = 0.
xmax = 20.0
x = linspace(xmin, xmax, n)
p_true = Parameters()
p_true.add('amp_g', value=21.0)
p_true.add('cen_g', value=8.1)
p_true.add('wid_g', value=1.6)
p_true.add('line_off', value=-1.023)
p_true.add('line_slope', value=0.62)
data = (gaussian(x, p_true['amp_g'].value, p_true['cen_g'].value,
p_true['wid_g'].value) +
random.normal(scale=0.23, size=n) +
x*p_true['line_slope'].value + p_true['line_off'].value )
if HASPYLAB:
pylab.plot(x, data, 'r+')
p_fit = Parameters()
p_fit.add('amp_g', value=10.0)
p_fit.add('cen_g', value=9)
p_fit.add('wid_g', value=1)
p_fit.add('line_slope', value=0.0)
p_fit.add('line_off', value=0.0)
myfit = Minimizer(residual, p_fit,
fcn_args=(x,),
fcn_kws={'sigma':0.2, 'data':data})
myfit.prepare_fit()
#
for scale_covar in (True, False):
myfit.scale_covar = scale_covar
print ' ==== scale_covar = ', myfit.scale_covar, ' ==='
for sigma in (0.1, 0.2, 0.23, 0.5):
myfit.userkws['sigma'] = sigma
p_fit['amp_g'].value = 10
p_fit['cen_g'].value = 9
p_fit['wid_g'].value = 1
p_fit['line_slope'].value =0.0
p_fit['line_off'].value =0.0
out = myfit.leastsq()
print ' sigma = ', sigma
print ' chisqr = ', out.chisqr
print ' reduced_chisqr = ', out.redchi
report_fit(out.params, modelpars=p_true, show_correl=False)
print ' =============================='
# if HASPYLAB:
# fit = residual(p_fit, x)
# pylab.plot(x, fit, 'k-')
# pylab.show()
#
|
DiamondLightSource/auto_tomo_calibration-experimental
|
old_code_scripts/simulate_data/lmfit-py/examples/example_covar.py
|
Python
|
apache-2.0
| 2,319
|
[
"Gaussian"
] |
e2c862943ea1ac1836eec9f166aaea0c49c1e74b7d13b3e0918a0989039043c3
|
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for FHD_cal object."""
import pytest
import os
import numpy as np
from pyuvdata import UVCal
import pyuvdata.tests as uvtest
from pyuvdata.data import DATA_PATH
# set up FHD files
testdir = os.path.join(DATA_PATH, "fhd_cal_data/")
testfile_prefix = "1061316296_"
obs_testfile = os.path.join(testdir, testfile_prefix + "obs.sav")
cal_testfile = os.path.join(testdir, testfile_prefix + "cal.sav")
settings_testfile = os.path.join(testdir, testfile_prefix + "settings.txt")
settings_testfile_nodiffuse = os.path.join(
testdir, testfile_prefix + "nodiffuse_settings.txt"
)
layout_testfile = os.path.join(testdir, testfile_prefix + "layout.sav")
testdir2 = os.path.join(DATA_PATH, "fhd_cal_data/set2")
obs_file_multi = [
obs_testfile,
os.path.join(testdir2, testfile_prefix + "obs.sav"),
]
cal_file_multi = [
cal_testfile,
os.path.join(testdir2, testfile_prefix + "cal.sav"),
]
layout_file_multi = [layout_testfile, layout_testfile]
settings_file_multi = [
settings_testfile,
os.path.join(testdir2, testfile_prefix + "settings.txt"),
]
@pytest.mark.parametrize("raw", [True, False])
def test_read_fhdcal_write_read_calfits(raw, fhd_cal_raw, fhd_cal_fit, tmp_path):
"""
FHD cal to calfits loopback test.
Read in FHD cal files, write out as calfits, read back in and check for
object equality.
"""
if raw:
fhd_cal = fhd_cal_raw
else:
fhd_cal = fhd_cal_fit
calfits_cal = UVCal()
filelist = [cal_testfile, obs_testfile, layout_testfile, settings_testfile]
assert fhd_cal.filename == sorted(os.path.basename(file) for file in filelist)
assert np.max(fhd_cal.gain_array) < 2.0
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
@pytest.mark.filterwarnings("ignore:Telescope location derived from obs lat/lon/alt")
@pytest.mark.parametrize("raw", [True, False])
def test_read_fhdcal_metadata(raw, fhd_cal_raw, fhd_cal_fit):
"""
Test FHD cal metadata only read.
"""
if raw:
fhd_cal_full = fhd_cal_raw
else:
fhd_cal_full = fhd_cal_fit
fhd_cal = UVCal()
fhd_cal.read_fhd_cal(
cal_testfile,
obs_testfile,
layout_file=layout_testfile,
settings_file=settings_testfile,
raw=raw,
read_data=False,
)
fhd_cal2 = fhd_cal_full.copy(metadata_only=True)
# this file set has a mismatch in Nsources between the cal file & settings
# file for some reason. I think it's just an issue with the files chosen
assert fhd_cal.Nsources != fhd_cal2.Nsources
fhd_cal.Nsources = fhd_cal2.Nsources
# there is a loss in precision for float auto scale values in the
# settings file vs the cal file
# first check that they are similar (extract from the string they are packed in)
assert np.allclose(
np.asarray(fhd_cal.extra_keywords["AUTOSCAL"][1:-1].split(", "), dtype=float),
np.asarray(fhd_cal2.extra_keywords["AUTOSCAL"][1:-1].split(", "), dtype=float),
)
# replace the strings to prevent errors
fhd_cal.extra_keywords["autoscal".upper()] = fhd_cal2.extra_keywords[
"autoscal".upper()
]
assert fhd_cal == fhd_cal2
fhd_cal.read_fhd_cal(
cal_testfile,
obs_testfile,
layout_file=layout_testfile,
settings_file=settings_testfile_nodiffuse,
raw=raw,
read_data=False,
)
fhd_cal2.diffuse_model = None
fhd_cal == fhd_cal2
return
@pytest.mark.filterwarnings("ignore:Telescope location derived from obs lat/lon/alt")
def test_read_fhdcal_multimode():
"""
Read cal with multiple mode_fit values.
"""
fhd_cal = UVCal()
fhd_cal.read_fhd_cal(
os.path.join(testdir, testfile_prefix + "multimode_cal.sav"),
obs_testfile,
layout_file=layout_testfile,
settings_file=os.path.join(testdir, testfile_prefix + "multimode_settings.txt"),
raw=False,
)
assert fhd_cal.extra_keywords["MODE_FIT"] == "[90, 150, 230, 320, 400, 524]"
fhd_cal2 = fhd_cal.copy(metadata_only=True)
# check metadata only read
fhd_cal.read_fhd_cal(
os.path.join(testdir, testfile_prefix + "multimode_cal.sav"),
obs_testfile,
layout_file=layout_testfile,
settings_file=os.path.join(testdir, testfile_prefix + "multimode_settings.txt"),
raw=False,
read_data=False,
)
# this file set has a mismatch in Nsources between the cal file & settings
# file for some reason. I think it's just an issue with the files chosen
assert fhd_cal.Nsources != fhd_cal2.Nsources
fhd_cal.Nsources = fhd_cal2.Nsources
# there is a loss in precision for float auto scale values in the
# settings file vs the cal file
assert (
fhd_cal.extra_keywords["autoscal".upper()]
!= fhd_cal2.extra_keywords["autoscal".upper()]
)
fhd_cal.extra_keywords["autoscal".upper()] = fhd_cal2.extra_keywords[
"autoscal".upper()
]
assert fhd_cal == fhd_cal2
return
@pytest.mark.filterwarnings("ignore:Telescope location derived from obs lat/lon/alt")
@pytest.mark.parametrize(
"extra_history",
[
"Some extra history for testing\n",
["Some extra history for testing", "And some more history as well"],
],
)
def test_extra_history(extra_history, tmp_path):
"""Test that setting the extra_history keyword works."""
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(
cal_testfile,
obs_testfile,
layout_file=layout_testfile,
settings_file=settings_testfile,
extra_history=extra_history,
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
for line in extra_history:
assert line in fhd_cal.history
return
@pytest.mark.filterwarnings("ignore:Telescope location derived from obs lat/lon/alt")
def test_flags_galaxy(tmp_path):
"""Test files with time, freq and tile flags and galaxy models behave."""
testdir = os.path.join(DATA_PATH, "fhd_cal_data/flag_set")
obs_testfile_flag = os.path.join(testdir, testfile_prefix + "obs.sav")
cal_testfile_flag = os.path.join(testdir, testfile_prefix + "cal.sav")
settings_testfile_flag = os.path.join(testdir, testfile_prefix + "settings.txt")
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(
cal_testfile_flag,
obs_testfile_flag,
layout_file=layout_testfile,
settings_file=settings_testfile_flag,
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
def test_unknown_telescope():
fhd_cal = UVCal()
with uvtest.check_warnings(
UserWarning,
match=[
"Telescope location derived from obs lat/lon/alt values does not match "
"the location in the layout file. ",
"Telescope foo is not in known_telescopes.",
],
):
fhd_cal.read_fhd_cal(
cal_testfile,
os.path.join(testdir, testfile_prefix + "telescopefoo_obs.sav"),
layout_file=layout_testfile,
settings_file=settings_testfile,
)
assert fhd_cal.telescope_name == "foo"
@pytest.mark.parametrize(
"cal_file,obs_file,layout_file,settings_file,nfiles",
[
[cal_testfile, obs_testfile, layout_testfile, settings_testfile, 1],
[cal_file_multi, obs_file_multi, layout_file_multi, settings_file_multi, 2],
],
)
def test_break_read_fhdcal(cal_file, obs_file, layout_file, settings_file, nfiles):
"""Try various cases of missing files."""
fhd_cal = UVCal()
# check useful error message for metadata only read with no settings file
with pytest.raises(
ValueError, match="A settings_file must be provided if read_data is False."
):
fhd_cal.read_fhd_cal(
cal_file, obs_file, layout_file=layout_file, read_data=False
)
message_list = [
"No settings file",
"Telescope location derived from obs lat/lon/alt values does not match the "
"location in the layout file.",
]
if nfiles > 1:
message_list *= 2
message_list.append("UVParameter diffuse_model does not match")
with uvtest.check_warnings(UserWarning, message_list):
fhd_cal.read_fhd_cal(cal_file, obs_file, layout_file=layout_file)
# Check only pyuvdata version history with no settings file
expected_history = "\n" + fhd_cal.pyuvdata_version_str
if nfiles > 1:
expected_history += " Combined data along time axis using pyuvdata."
assert fhd_cal.history == expected_history
message_list = (
["No layout file, antenna_postions will not be defined."] * nfiles
+ ["UVParameter diffuse_model does not match"] * (nfiles - 1)
+ [
"The antenna_positions parameter is not set. It will be a required "
"parameter starting in pyuvdata version 2.3"
]
* (4 * nfiles - 3)
)
warning_list = [UserWarning] * (2 * nfiles - 1) + [DeprecationWarning] * (
4 * nfiles - 3
)
with uvtest.check_warnings(warning_list, message_list):
fhd_cal.read_fhd_cal(cal_file, obs_file, settings_file=settings_file)
# Check no antenna_positions
assert fhd_cal.antenna_positions is None
def test_read_multi(tmp_path):
"""Test reading in multiple files."""
fhd_cal = UVCal()
calfits_cal = UVCal()
with uvtest.check_warnings(
UserWarning,
[
"UVParameter diffuse_model does not match",
"Telescope location derived from obs lat/lon/alt values does not match the "
"location in the layout file.",
"Telescope location derived from obs lat/lon/alt values does not match the "
"location in the layout file.",
],
):
fhd_cal.read_fhd_cal(
cal_file_multi,
obs_file_multi,
settings_file=settings_file_multi,
layout_file=layout_file_multi,
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
@pytest.mark.parametrize(
"cal_file,obs_file,layout_file,settings_file,message",
[
[
cal_file_multi[0],
obs_file_multi,
layout_file_multi,
settings_file_multi,
"Number of obs_files must match number of cal_files",
],
[
cal_file_multi,
obs_file_multi[0],
layout_file_multi,
settings_file_multi,
"Number of obs_files must match number of cal_files",
],
[
cal_file_multi,
obs_file_multi,
layout_file_multi[0],
settings_file_multi,
"Number of layout_files must match number of cal_files",
],
[
cal_file_multi,
obs_file_multi,
layout_file_multi,
settings_file_multi[0],
"Number of settings_files must match number of cal_files",
],
[
cal_file_multi,
obs_file_multi + obs_file_multi,
layout_file_multi,
settings_file_multi,
"Number of obs_files must match number of cal_files",
],
[
cal_file_multi,
obs_file_multi,
layout_file_multi + layout_file_multi,
settings_file_multi,
"Number of layout_files must match number of cal_files",
],
[
cal_file_multi,
obs_file_multi,
layout_file_multi,
settings_file_multi + settings_file_multi,
"Number of settings_files must match number of cal_files",
],
[
cal_file_multi[0],
obs_file_multi[0],
layout_file_multi,
settings_file_multi,
"Number of layout_files must match number of cal_files",
],
[
cal_file_multi[0],
obs_file_multi[0],
layout_file_multi[0],
settings_file_multi,
"Number of settings_files must match number of cal_files",
],
],
)
def test_break_read_multi(cal_file, obs_file, layout_file, settings_file, message):
"""Test errors for different numbers of files."""
fhd_cal = UVCal()
with pytest.raises(ValueError, match=message):
fhd_cal.read_fhd_cal(
cal_file, obs_file, layout_file=layout_file, settings_file=settings_file
)
|
HERA-Team/pyuvdata
|
pyuvdata/uvcal/tests/test_fhd_cal.py
|
Python
|
bsd-2-clause
| 13,088
|
[
"Galaxy"
] |
a516343a04405ee0e93947727ff7e5e7ae97ca1ed1f1a2779f3fc9d110f1a33a
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.show.component Contains the ShowComponent class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ..component.galaxy import GalaxyModelingComponent
from ...core.tools import filesystem as fs
# -----------------------------------------------------------------
class ShowComponent(GalaxyModelingComponent):
"""
This class...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(ShowComponent, self).__init__(*args, **kwargs)
# -- Attributes --
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(ShowComponent, self).setup(**kwargs)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/show/component.py
|
Python
|
agpl-3.0
| 1,503
|
[
"Galaxy"
] |
30f52e61d6f4bfd9ac185b704b4d92f0406e84fc8025fab2de825d5f7519ccca
|
"""
Utility functions for transcripts.
++++++++++++++++++++++++++++++++++
"""
import copy
import logging
import os
from functools import wraps
import requests
import simplejson as json
import six
from django.conf import settings
from lxml import etree
from opaque_keys.edx.locator import BundleDefinitionLocator
from pysrt import SubRipFile, SubRipItem, SubRipTime
from pysrt.srtexc import Error
from six import text_type
from six.moves import range, zip
from six.moves.html_parser import HTMLParser
from openedx.core.djangolib import blockstore_cache
from openedx.core.lib import blockstore_api
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from .bumper_utils import get_bumper_settings
try:
from edxval import api as edxval_api
except ImportError:
edxval_api = None
log = logging.getLogger(__name__)
NON_EXISTENT_TRANSCRIPT = 'non_existent_dummy_file_name'
class TranscriptException(Exception):
pass
class TranscriptsGenerationException(Exception):
pass
class GetTranscriptsFromYouTubeException(Exception):
pass
class TranscriptsRequestValidationException(Exception):
pass
def exception_decorator(func):
"""
Generate NotFoundError for TranscriptsGenerationException, UnicodeDecodeError.
Args:
`func`: Input function
Returns:
'wrapper': Decorated function
"""
@wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except (TranscriptsGenerationException, UnicodeDecodeError) as ex:
log.exception(text_type(ex))
raise NotFoundError
return wrapper
def generate_subs(speed, source_speed, source_subs):
"""
Generate transcripts from one speed to another speed.
Args:
`speed`: float, for this speed subtitles will be generated,
`source_speed`: float, speed of source_subs
`source_subs`: dict, existing subtitles for speed `source_speed`.
Returns:
`subs`: dict, actual subtitles.
"""
if speed == source_speed:
return source_subs
coefficient = 1.0 * speed / source_speed
subs = {
'start': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['start']
],
'end': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['end']
],
'text': source_subs['text']}
return subs
def save_to_store(content, name, mime_type, location):
"""
Save named content to store by location.
Returns location of saved content.
"""
content_location = Transcript.asset_location(location, name)
content = StaticContent(content_location, name, mime_type, content)
contentstore().save(content)
return content_location
def save_subs_to_store(subs, subs_id, item, language='en'):
"""
Save transcripts into `StaticContent`.
Args:
`subs_id`: str, subtitles id
`item`: video module instance
`language`: two chars str ('uk'), language of translation of transcripts
Returns: location of saved subtitles.
"""
filedata = json.dumps(subs, indent=2).encode('utf-8')
filename = subs_filename(subs_id, language)
return save_to_store(filedata, filename, 'application/json', item.location)
def youtube_video_transcript_name(youtube_text_api):
"""
Get the transcript name from available transcripts of video
with respect to language from youtube server
"""
utf8_parser = etree.XMLParser(encoding='utf-8')
transcripts_param = {'type': 'list', 'v': youtube_text_api['params']['v']}
lang = youtube_text_api['params']['lang']
# get list of transcripts of specific video
# url-form
# http://video.google.com/timedtext?type=list&v={VideoId}
youtube_response = requests.get('http://' + youtube_text_api['url'], params=transcripts_param)
if youtube_response.status_code == 200 and youtube_response.text:
youtube_data = etree.fromstring(youtube_response.text.encode('utf-8'), parser=utf8_parser)
# iterate all transcripts information from youtube server
for element in youtube_data:
# search specific language code such as 'en' in transcripts info list
if element.tag == 'track' and element.get('lang_code', '') == lang:
return element.get('name')
return None
def get_transcripts_from_youtube(youtube_id, settings, i18n, youtube_transcript_name=''):
"""
Gets transcripts from youtube for youtube_id.
Parses only utf-8 encoded transcripts.
Other encodings are not supported at the moment.
Returns (status, transcripts): bool, dict.
"""
_ = i18n.ugettext
utf8_parser = etree.XMLParser(encoding='utf-8')
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = youtube_id
# if the transcript name is not empty on youtube server we have to pass
# name param in url in order to get transcript
# example http://video.google.com/timedtext?lang=en&v={VideoId}&name={transcript_name}
youtube_transcript_name = youtube_video_transcript_name(youtube_text_api)
if youtube_transcript_name:
youtube_text_api['params']['name'] = youtube_transcript_name
data = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])
if data.status_code != 200 or not data.text:
msg = _("Can't receive transcripts from Youtube for {youtube_id}. Status code: {status_code}.").format(
youtube_id=youtube_id,
status_code=data.status_code
)
raise GetTranscriptsFromYouTubeException(msg)
sub_starts, sub_ends, sub_texts = [], [], []
xmltree = etree.fromstring(data.content, parser=utf8_parser)
for element in xmltree:
if element.tag == "text":
start = float(element.get("start"))
duration = float(element.get("dur", 0)) # dur is not mandatory
text = element.text
end = start + duration
if text:
# Start and end should be ints representing the millisecond timestamp.
sub_starts.append(int(start * 1000))
sub_ends.append(int((end + 0.0001) * 1000))
sub_texts.append(text.replace('\n', ' '))
return {'start': sub_starts, 'end': sub_ends, 'text': sub_texts}
def download_youtube_subs(youtube_id, video_descriptor, settings):
"""
Download transcripts from Youtube.
Args:
youtube_id: str, actual youtube_id of the video.
video_descriptor: video descriptor instance.
We save transcripts for 1.0 speed, as for other speed conversion is done on front-end.
Returns:
Serialized sjson transcript content, if transcripts were successfully downloaded and saved.
Raises:
GetTranscriptsFromYouTubeException, if fails.
"""
i18n = video_descriptor.runtime.service(video_descriptor, "i18n")
_ = i18n.ugettext
subs = get_transcripts_from_youtube(youtube_id, settings, i18n)
return json.dumps(subs, indent=2)
def remove_subs_from_store(subs_id, item, lang='en'):
"""
Remove from store, if transcripts content exists.
"""
filename = subs_filename(subs_id, lang)
Transcript.delete_asset(item.location, filename)
def generate_subs_from_source(speed_subs, subs_type, subs_filedata, item, language='en'):
"""Generate transcripts from source files (like SubRip format, etc.)
and save them to assets for `item` module.
We expect, that speed of source subs equal to 1
:param speed_subs: dictionary {speed: sub_id, ...}
:param subs_type: type of source subs: "srt", ...
:param subs_filedata:unicode, content of source subs.
:param item: module object.
:param language: str, language of translation of transcripts
:returns: True, if all subs are generated and saved successfully.
"""
_ = item.runtime.service(item, "i18n").ugettext
if subs_type.lower() != 'srt':
raise TranscriptsGenerationException(_("We support only SubRip (*.srt) transcripts format."))
try:
srt_subs_obj = SubRipFile.from_string(subs_filedata)
except Exception as ex:
msg = _("Something wrong with SubRip transcripts file during parsing. Inner message is {error_message}").format(
error_message=text_type(ex)
)
raise TranscriptsGenerationException(msg)
if not srt_subs_obj:
raise TranscriptsGenerationException(_("Something wrong with SubRip transcripts file during parsing."))
sub_starts = []
sub_ends = []
sub_texts = []
for sub in srt_subs_obj:
sub_starts.append(sub.start.ordinal)
sub_ends.append(sub.end.ordinal)
sub_texts.append(sub.text.replace('\n', ' '))
subs = {
'start': sub_starts,
'end': sub_ends,
'text': sub_texts}
for speed, subs_id in six.iteritems(speed_subs):
save_subs_to_store(
generate_subs(speed, 1, subs),
subs_id,
item,
language
)
return subs
def generate_srt_from_sjson(sjson_subs, speed):
"""Generate transcripts with speed = 1.0 from sjson to SubRip (*.srt).
:param sjson_subs: "sjson" subs.
:param speed: speed of `sjson_subs`.
:returns: "srt" subs.
"""
output = ''
equal_len = len(sjson_subs['start']) == len(sjson_subs['end']) == len(sjson_subs['text'])
if not equal_len:
return output
sjson_speed_1 = generate_subs(speed, 1, sjson_subs)
for i in range(len(sjson_speed_1['start'])):
item = SubRipItem(
index=i,
start=SubRipTime(milliseconds=sjson_speed_1['start'][i]),
end=SubRipTime(milliseconds=sjson_speed_1['end'][i]),
text=sjson_speed_1['text'][i]
)
output += (six.text_type(item))
output += '\n'
return output
def generate_sjson_from_srt(srt_subs):
"""
Generate transcripts from sjson to SubRip (*.srt).
Arguments:
srt_subs(SubRip): "SRT" subs object
Returns:
Subs converted to "SJSON" format.
"""
sub_starts = []
sub_ends = []
sub_texts = []
for sub in srt_subs:
sub_starts.append(sub.start.ordinal)
sub_ends.append(sub.end.ordinal)
sub_texts.append(sub.text.replace('\n', ' '))
sjson_subs = {
'start': sub_starts,
'end': sub_ends,
'text': sub_texts
}
return sjson_subs
def copy_or_rename_transcript(new_name, old_name, item, delete_old=False, user=None):
"""
Renames `old_name` transcript file in storage to `new_name`.
If `old_name` is not found in storage, raises `NotFoundError`.
If `delete_old` is True, removes `old_name` files from storage.
"""
filename = u'subs_{0}.srt.sjson'.format(old_name)
content_location = StaticContent.compute_location(item.location.course_key, filename)
transcripts = contentstore().find(content_location).data.decode('utf-8')
save_subs_to_store(json.loads(transcripts), new_name, item)
item.sub = new_name
item.save_with_metadata(user)
if delete_old:
remove_subs_from_store(old_name, item)
def get_html5_ids(html5_sources):
"""
Helper method to parse out an HTML5 source into the ideas
NOTE: This assumes that '/' are not in the filename
"""
html5_ids = [x.split('/')[-1].rsplit('.', 1)[0] for x in html5_sources]
return html5_ids
def manage_video_subtitles_save(item, user, old_metadata=None, generate_translation=False):
"""
Does some specific things, that can be done only on save.
Video player item has some video fields: HTML5 ones and Youtube one.
If value of `sub` field of `new_item` is cleared, transcripts should be removed.
`item` is video module instance with updated values of fields,
but actually have not been saved to store yet.
`old_metadata` contains old values of XFields.
# 1.
If value of `sub` field of `new_item` is different from values of video fields of `new_item`,
and `new_item.sub` file is present, then code in this function creates copies of
`new_item.sub` file with new names. That names are equal to values of video fields of `new_item`
After that `sub` field of `new_item` is changed to one of values of video fields.
This whole action ensures that after user changes video fields, proper `sub` files, corresponding
to new values of video fields, will be presented in system.
# 2. convert /static/filename.srt to filename.srt in self.transcripts.
(it is done to allow user to enter both /static/filename.srt and filename.srt)
# 3. Generate transcripts translation only when user clicks `save` button, not while switching tabs.
a) delete sjson translation for those languages, which were removed from `item.transcripts`.
Note: we are not deleting old SRT files to give user more flexibility.
b) For all SRT files in`item.transcripts` regenerate new SJSON files.
(To avoid confusing situation if you attempt to correct a translation by uploading
a new version of the SRT file with same name).
"""
_ = item.runtime.service(item, "i18n").ugettext
# # 1.
# html5_ids = get_html5_ids(item.html5_sources)
# # Youtube transcript source should always have a higher priority than html5 sources. Appending
# # `youtube_id_1_0` at the end helps achieve this when we read transcripts list.
# possible_video_id_list = html5_ids + [item.youtube_id_1_0]
# sub_name = item.sub
# for video_id in possible_video_id_list:
# if not video_id:
# continue
# if not sub_name:
# remove_subs_from_store(video_id, item)
# continue
# # copy_or_rename_transcript changes item.sub of module
# try:
# # updates item.sub with `video_id`, if it is successful.
# copy_or_rename_transcript(video_id, sub_name, item, user=user)
# except NotFoundError:
# # subtitles file `sub_name` is not presented in the system. Nothing to copy or rename.
# log.debug(
# "Copying %s file content to %s name is failed, "
# "original file does not exist.",
# sub_name, video_id
# )
# 2.
if generate_translation:
for lang, filename in item.transcripts.items():
item.transcripts[lang] = os.path.split(filename)[-1]
# 3.
if generate_translation:
old_langs = set(old_metadata.get('transcripts', {})) if old_metadata else set()
new_langs = set(item.transcripts)
html5_ids = get_html5_ids(item.html5_sources)
possible_video_id_list = html5_ids + [item.youtube_id_1_0]
for lang in old_langs.difference(new_langs): # 3a
for video_id in possible_video_id_list:
if video_id:
remove_subs_from_store(video_id, item, lang)
reraised_message = ''
for lang in new_langs: # 3b
try:
generate_sjson_for_all_speeds(
item,
item.transcripts[lang],
{speed: subs_id for subs_id, speed in six.iteritems(youtube_speed_dict(item))},
lang,
)
except TranscriptException as ex:
pass
if reraised_message:
item.save_with_metadata(user)
raise TranscriptException(reraised_message)
def youtube_speed_dict(item):
"""
Returns {speed: youtube_ids, ...} dict for existing youtube_ids
"""
yt_ids = [item.youtube_id_0_75, item.youtube_id_1_0, item.youtube_id_1_25, item.youtube_id_1_5]
yt_speeds = [0.75, 1.00, 1.25, 1.50]
youtube_ids = {p[0]: p[1] for p in zip(yt_ids, yt_speeds) if p[0]}
return youtube_ids
def subs_filename(subs_id, lang='en'):
"""
Generate proper filename for storage.
"""
if lang == 'en':
return u'subs_{0}.srt.sjson'.format(subs_id)
else:
return u'{0}_subs_{1}.srt.sjson'.format(lang, subs_id)
def generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, lang):
"""
Generates sjson from srt for given lang.
`item` is module object.
"""
_ = item.runtime.service(item, "i18n").ugettext
try:
srt_transcripts = contentstore().find(Transcript.asset_location(item.location, user_filename))
except NotFoundError as ex:
raise TranscriptException(_("{exception_message}: Can't find uploaded transcripts: {user_filename}").format(
exception_message=text_type(ex),
user_filename=user_filename
))
if not lang:
lang = item.transcript_language
# Used utf-8-sig encoding type instead of utf-8 to remove BOM(Byte Order Mark), e.g. U+FEFF
generate_subs_from_source(
result_subs_dict,
os.path.splitext(user_filename)[1][1:],
srt_transcripts.data.decode('utf-8-sig'),
item,
lang
)
def get_or_create_sjson(item, transcripts):
"""
Get sjson if already exists, otherwise generate it.
Generate sjson with subs_id name, from user uploaded srt.
Subs_id is extracted from srt filename, which was set by user.
Args:
transcipts (dict): dictionary of (language: file) pairs.
Raises:
TranscriptException: when srt subtitles do not exist,
and exceptions from generate_subs_from_source.
`item` is module object.
"""
user_filename = transcripts[item.transcript_language]
user_subs_id = os.path.splitext(user_filename)[0]
source_subs_id, result_subs_dict = user_subs_id, {1.0: user_subs_id}
try:
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
except NotFoundError: # generating sjson from srt
generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, item.transcript_language)
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
return sjson_transcript
def get_video_ids_info(edx_video_id, youtube_id_1_0, html5_sources):
"""
Returns list internal or external video ids.
Arguments:
edx_video_id (unicode): edx_video_id
youtube_id_1_0 (unicode): youtube id
html5_sources (list): html5 video ids
Returns:
tuple: external or internal, video ids list
"""
clean = lambda item: item.strip() if isinstance(item, six.string_types) else item
external = not bool(clean(edx_video_id))
video_ids = [edx_video_id, youtube_id_1_0] + get_html5_ids(html5_sources)
# video_ids cleanup
video_ids = [item for item in video_ids if bool(clean(item))]
return external, video_ids
def clean_video_id(edx_video_id):
"""
Cleans an edx video ID.
Arguments:
edx_video_id(unicode): edx-val's video identifier
"""
return edx_video_id and edx_video_id.strip()
def get_video_transcript_content(edx_video_id, language_code):
"""
Gets video transcript content, only if the corresponding feature flag is enabled for the given `course_id`.
Arguments:
language_code(unicode): Language code of the requested transcript
edx_video_id(unicode): edx-val's video identifier
Returns:
A dict containing transcript's file name and its sjson content.
"""
transcript = None
edx_video_id = clean_video_id(edx_video_id)
if edxval_api and edx_video_id:
transcript = edxval_api.get_video_transcript_data(edx_video_id, language_code)
return transcript
def get_available_transcript_languages(edx_video_id):
"""
Gets available transcript languages for a video.
Arguments:
edx_video_id(unicode): edx-val's video identifier
Returns:
A list containing distinct transcript language codes against all the passed video ids.
"""
available_languages = []
edx_video_id = clean_video_id(edx_video_id)
if edxval_api and edx_video_id:
available_languages = edxval_api.get_available_transcript_languages(video_id=edx_video_id)
return available_languages
def convert_video_transcript(file_name, content, output_format):
"""
Convert video transcript into desired format
Arguments:
file_name: name of transcript file along with its extension
content: transcript content stream
output_format: the format in which transcript will be converted
Returns:
A dict containing the new transcript filename and the content converted into desired format.
"""
name_and_extension = os.path.splitext(file_name)
basename, input_format = name_and_extension[0], name_and_extension[1][1:]
filename = u'{base_name}.{ext}'.format(base_name=basename, ext=output_format)
converted_transcript = Transcript.convert(content, input_format=input_format, output_format=output_format)
return dict(filename=filename, content=converted_transcript)
class Transcript(object):
"""
Container for transcript methods.
"""
SRT = u'srt'
TXT = u'txt'
SJSON = u'sjson'
mime_types = {
SRT: u'application/x-subrip; charset=utf-8',
TXT: u'text/plain; charset=utf-8',
SJSON: u'application/json',
}
@staticmethod
def convert(content, input_format, output_format):
"""
Convert transcript `content` from `input_format` to `output_format`.
Accepted input formats: sjson, srt.
Accepted output format: srt, txt, sjson.
Raises:
TranscriptsGenerationException: On parsing the invalid srt content during conversion from srt to sjson.
"""
assert input_format in ('srt', 'sjson')
assert output_format in ('txt', 'srt', 'sjson')
if input_format == output_format:
return content
if input_format == 'srt':
# Standardize content into bytes for later decoding.
if isinstance(content, text_type):
content = content.encode('utf-8')
if output_format == 'txt':
text = SubRipFile.from_string(content.decode('utf-8')).text
return HTMLParser().unescape(text)
elif output_format == 'sjson':
try:
srt_subs = SubRipFile.from_string(
# Skip byte order mark(BOM) character
content.decode('utf-8-sig'),
error_handling=SubRipFile.ERROR_RAISE
)
except Error as ex: # Base exception from pysrt
raise TranscriptsGenerationException(text_type(ex))
return json.dumps(generate_sjson_from_srt(srt_subs))
if input_format == 'sjson':
if output_format == 'txt':
text = json.loads(content)['text']
text_without_none = [line if line else '' for line in text]
return HTMLParser().unescape("\n".join(text_without_none))
elif output_format == 'srt':
return generate_srt_from_sjson(json.loads(content), speed=1.0)
@staticmethod
def asset(location, subs_id, lang='en', filename=None):
"""
Get asset from contentstore, asset location is built from subs_id and lang.
`location` is module location.
"""
# HACK Warning! this is temporary and will be removed once edx-val take over the
# transcript module and contentstore will only function as fallback until all the
# data is migrated to edx-val. It will be saving a contentstore hit for a hardcoded
# dummy-non-existent-transcript name.
if NON_EXISTENT_TRANSCRIPT in [subs_id, filename]:
raise NotFoundError
asset_filename = subs_filename(subs_id, lang) if not filename else filename
return Transcript.get_asset(location, asset_filename)
@staticmethod
def get_asset(location, filename):
"""
Return asset by location and filename.
"""
return contentstore().find(Transcript.asset_location(location, filename))
@staticmethod
def asset_location(location, filename):
"""
Return asset location. `location` is module location.
"""
# If user transcript filename is empty, raise `TranscriptException` to avoid `InvalidKeyError`.
if not filename:
raise TranscriptException("Transcript not uploaded yet")
return StaticContent.compute_location(location.course_key, filename)
@staticmethod
def delete_asset(location, filename):
"""
Delete asset by location and filename.
"""
try:
contentstore().delete(Transcript.asset_location(location, filename))
log.info("Transcript asset %s was removed from store.", filename)
except NotFoundError:
pass
return StaticContent.compute_location(location.course_key, filename)
class VideoTranscriptsMixin(object):
"""Mixin class for transcript functionality.
This is necessary for VideoBlock.
"""
def available_translations(self, transcripts, verify_assets=None, is_bumper=False):
"""
Return a list of language codes for which we have transcripts.
Arguments:
verify_assets (boolean): If True, checks to ensure that the transcripts
really exist in the contentstore. If False, we just look at the
VideoBlock fields and do not query the contentstore. One reason
we might do this is to avoid slamming contentstore() with queries
when trying to make a listing of videos and their languages.
Defaults to `not FALLBACK_TO_ENGLISH_TRANSCRIPTS`.
transcripts (dict): A dict with all transcripts and a sub.
include_val_transcripts(boolean): If True, adds the edx-val transcript languages as well.
"""
translations = []
if verify_assets is None:
verify_assets = not settings.FEATURES.get('FALLBACK_TO_ENGLISH_TRANSCRIPTS')
sub, other_langs = transcripts["sub"], transcripts["transcripts"]
if verify_assets:
all_langs = dict(**other_langs)
if sub:
all_langs.update({'en': sub})
for language, filename in six.iteritems(all_langs):
try:
# for bumper videos, transcripts are stored in content store only
if is_bumper:
get_transcript_for_video(self.location, filename, filename, language)
else:
get_transcript(self, language)
except NotFoundError:
continue
translations.append(language)
else:
# If we're not verifying the assets, we just trust our field values
translations = list(other_langs)
if not translations or sub:
translations += ['en']
# to clean redundant language codes.
return list(set(translations))
def get_transcript(self, transcripts, transcript_format='srt', lang=None):
"""
Returns transcript, filename and MIME type.
transcripts (dict): A dict with all transcripts and a sub.
Raises:
- NotFoundError if cannot find transcript file in storage.
- ValueError if transcript file is empty or incorrect JSON.
- KeyError if transcript file has incorrect format.
If language is 'en', self.sub should be correct subtitles name.
If language is 'en', but if self.sub is not defined, this means that we
should search for video name in order to get proper transcript (old style courses).
If language is not 'en', give back transcript in proper language and format.
"""
if not lang:
lang = self.get_default_transcript_language(transcripts)
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if lang == 'en':
if sub: # HTML5 case and (Youtube case for new style videos)
transcript_name = sub
elif self.youtube_id_1_0: # old courses
transcript_name = self.youtube_id_1_0
else:
log.debug("No subtitles for 'en' language")
raise ValueError
data = Transcript.asset(self.location, transcript_name, lang).data.decode('utf-8')
filename = u'{}.{}'.format(transcript_name, transcript_format)
content = Transcript.convert(data, 'sjson', transcript_format)
else:
data = Transcript.asset(self.location, None, None, other_lang[lang]).data.decode('utf-8')
filename = u'{}.{}'.format(os.path.splitext(other_lang[lang])[0], transcript_format)
content = Transcript.convert(data, 'srt', transcript_format)
if not content:
log.debug('no subtitles produced in get_transcript')
raise ValueError
return content, filename, Transcript.mime_types[transcript_format]
def get_default_transcript_language(self, transcripts):
"""
Returns the default transcript language for this video module.
Args:
transcripts (dict): A dict with all transcripts and a sub.
"""
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.transcript_language in other_lang:
transcript_language = self.transcript_language
elif sub:
transcript_language = u'en'
elif len(other_lang) > 0:
transcript_language = sorted(other_lang)[0]
else:
transcript_language = u'en'
return transcript_language
def get_transcripts_info(self, is_bumper=False):
"""
Returns a transcript dictionary for the video.
Arguments:
is_bumper(bool): If True, the request is for the bumper transcripts
include_val_transcripts(bool): If True, include edx-val transcripts as well
"""
if is_bumper:
transcripts = copy.deepcopy(get_bumper_settings(self).get('transcripts', {}))
sub = transcripts.pop("en", "")
else:
transcripts = self.transcripts if self.transcripts else {}
sub = self.sub
# Only attach transcripts that are not empty.
transcripts = {
language_code: transcript_file
for language_code, transcript_file in transcripts.items() if transcript_file != ''
}
# bumper transcripts are stored in content store so we don't need to include val transcripts
if not is_bumper:
transcript_languages = get_available_transcript_languages(edx_video_id=self.edx_video_id)
# HACK Warning! this is temporary and will be removed once edx-val take over the
# transcript module and contentstore will only function as fallback until all the
# data is migrated to edx-val.
for language_code in transcript_languages:
if language_code == 'en' and not sub:
sub = NON_EXISTENT_TRANSCRIPT
elif not transcripts.get(language_code):
transcripts[language_code] = NON_EXISTENT_TRANSCRIPT
return {
"sub": sub,
"transcripts": transcripts,
}
@exception_decorator
def get_transcript_from_val(edx_video_id, lang=None, output_format=Transcript.SRT):
"""
Get video transcript from edx-val.
Arguments:
edx_video_id (unicode): video identifier
lang (unicode): transcript language
output_format (unicode): transcript output format
Returns:
tuple containing content, filename, mimetype
"""
transcript = get_video_transcript_content(edx_video_id, lang)
if not transcript:
raise NotFoundError(u'Transcript not found for {}, lang: {}'.format(edx_video_id, lang))
transcript_conversion_props = dict(transcript, output_format=output_format)
transcript = convert_video_transcript(**transcript_conversion_props)
filename = transcript['filename']
content = transcript['content']
mimetype = Transcript.mime_types[output_format]
return content, filename, mimetype
def get_transcript_for_video(video_location, subs_id, file_name, language):
"""
Get video transcript from content store.
NOTE: Transcripts can be searched from content store by two ways:
1. by an id(a.k.a subs_id) which will be used to construct transcript filename
2. by providing transcript filename
Arguments:
video_location (Locator): Video location
subs_id (unicode): id for a transcript in content store
file_name (unicode): file_name for a transcript in content store
language (unicode): transcript language
Returns:
tuple containing transcript input_format, basename, content
"""
try:
if subs_id is None:
raise NotFoundError
content = Transcript.asset(video_location, subs_id, language).data.decode('utf-8')
base_name = subs_id
input_format = Transcript.SJSON
except NotFoundError:
content = Transcript.asset(video_location, None, language, file_name).data.decode('utf-8')
base_name = os.path.splitext(file_name)[0]
input_format = Transcript.SRT
return input_format, base_name, content
@exception_decorator
def get_transcript_from_contentstore(video, language, output_format, transcripts_info, youtube_id=None):
"""
Get video transcript from content store.
Arguments:
video (Video Descriptor): Video descriptor
language (unicode): transcript language
output_format (unicode): transcript output format
transcripts_info (dict): transcript info for a video
youtube_id (unicode): youtube video id
Returns:
tuple containing content, filename, mimetype
"""
input_format, base_name, transcript_content = None, None, None
if output_format not in (Transcript.SRT, Transcript.SJSON, Transcript.TXT):
raise NotFoundError('Invalid transcript format `{output_format}`'.format(output_format=output_format))
sub, other_languages = transcripts_info['sub'], transcripts_info['transcripts']
transcripts = dict(other_languages)
# this is sent in case of a translation dispatch and we need to use it as our subs_id.
possible_sub_ids = [youtube_id, sub, video.youtube_id_1_0] + get_html5_ids(video.html5_sources)
for sub_id in possible_sub_ids:
try:
transcripts[u'en'] = sub_id
input_format, base_name, transcript_content = get_transcript_for_video(
video.location,
subs_id=sub_id,
file_name=transcripts[language],
language=language
)
break
except (KeyError, NotFoundError):
continue
if transcript_content is None:
raise NotFoundError('No transcript for `{lang}` language'.format(
lang=language
))
# add language prefix to transcript file only if language is not None
language_prefix = '{}_'.format(language) if language else ''
transcript_name = u'{}{}.{}'.format(language_prefix, base_name, output_format)
transcript_content = Transcript.convert(transcript_content, input_format=input_format, output_format=output_format)
if not transcript_content.strip():
raise NotFoundError('No transcript content')
if youtube_id:
youtube_ids = youtube_speed_dict(video)
transcript_content = json.dumps(
generate_subs(youtube_ids.get(youtube_id, 1), 1, json.loads(transcript_content))
)
return transcript_content, transcript_name, Transcript.mime_types[output_format]
def get_transcript_from_blockstore(video_block, language, output_format, transcripts_info):
"""
Get video transcript from Blockstore.
Blockstore expects video transcripts to be placed into the 'static/'
subfolder of the XBlock's folder in a Blockstore bundle. For example, if the
video XBlock's definition is in the standard location of
video/video1/definition.xml
Then the .srt files should be placed at e.g.
video/video1/static/video1-en.srt
This is the same place where other public static files are placed for other
XBlocks, such as image files used by HTML blocks.
Video XBlocks in Blockstore must set the 'transcripts' XBlock field to a
JSON dictionary listing the filename of the transcript for each language:
<video
youtube_id_1_0="3_yD_cEKoCk"
transcripts='{"en": "3_yD_cEKoCk-en.srt"}'
display_name="Welcome Video with Transcript"
download_track="true"
/>
This method is tested in openedx/core/djangoapps/content_libraries/tests/test_static_assets.py
Arguments:
video_block (Video XBlock): The video XBlock
language (str): transcript language
output_format (str): transcript output format
transcripts_info (dict): transcript info for a video, from video_block.get_transcripts_info()
Returns:
tuple containing content, filename, mimetype
"""
if output_format not in (Transcript.SRT, Transcript.SJSON, Transcript.TXT):
raise NotFoundError('Invalid transcript format `{output_format}`'.format(output_format=output_format))
transcripts = transcripts_info['transcripts']
if language not in transcripts:
raise NotFoundError("Video {} does not have a transcript file defined for the '{}' language in its OLX.".format(
video_block.scope_ids.usage_id,
language,
))
filename = transcripts[language]
if not filename.endswith('.srt'):
# We want to standardize on .srt
raise NotFoundError("Video XBlocks in Blockstore only support .srt transcript files.")
# Try to load the transcript file out of Blockstore
# In lieu of an XBlock API for this (like block.runtime.resources_fs), we use the blockstore API directly.
bundle_uuid = video_block.scope_ids.def_id.bundle_uuid
path = video_block.scope_ids.def_id.olx_path.rpartition('/')[0] + '/static/' + filename
bundle_version = video_block.scope_ids.def_id.bundle_version # Either bundle_version or draft_name will be set.
draft_name = video_block.scope_ids.def_id.draft_name
try:
content_binary = blockstore_cache.get_bundle_file_data_with_cache(bundle_uuid, path, bundle_version, draft_name)
except blockstore_api.BundleFileNotFound:
raise NotFoundError("Transcript file '{}' missing for video XBlock {}".format(
path,
video_block.scope_ids.usage_id,
))
# Now convert the transcript data to the requested format:
filename_no_extension = os.path.splitext(filename)[0]
output_filename = '{}.{}'.format(filename_no_extension, output_format)
output_transcript = Transcript.convert(
content_binary.decode('utf-8'),
input_format=Transcript.SRT,
output_format=output_format,
)
if not output_transcript.strip():
raise NotFoundError('No transcript content')
return output_transcript, output_filename, Transcript.mime_types[output_format]
def get_transcript(video, lang=None, output_format=Transcript.SRT, youtube_id=None):
"""
Get video transcript from edx-val or content store.
Arguments:
video (Video Descriptor): Video Descriptor
lang (unicode): transcript language
output_format (unicode): transcript output format
youtube_id (unicode): youtube video id
Returns:
tuple containing content, filename, mimetype
"""
transcripts_info = video.get_transcripts_info()
if not lang:
lang = video.get_default_transcript_language(transcripts_info)
if isinstance(video.scope_ids.def_id, BundleDefinitionLocator):
# This block is in Blockstore.
# For Blockstore, VAL is considered deprecated and we can load the transcript file
# directly using the Blockstore API:
return get_transcript_from_blockstore(video, lang, output_format, transcripts_info)
try:
edx_video_id = clean_video_id(video.edx_video_id)
if not edx_video_id:
raise NotFoundError
return get_transcript_from_val(edx_video_id, lang, output_format)
except NotFoundError:
return get_transcript_from_contentstore(
video,
lang,
youtube_id=youtube_id,
output_format=output_format,
transcripts_info=transcripts_info
)
|
appsembler/edx-platform
|
common/lib/xmodule/xmodule/video_module/transcripts_utils.py
|
Python
|
agpl-3.0
| 40,929
|
[
"FEFF"
] |
0d41698fc7beaf13c96417ea925f8ac9e14c46b7101560ab111b7017eb6da82c
|
if __name__ == "__main__":
import sys,os
selfname = sys.argv[0]
full_path = os.path.abspath(selfname)[:]
last_slash = full_path.rfind('/')
dirpath = full_path[:last_slash] + '/..'
print "Append to PYTHONPATH: %s" % (dirpath)
sys.path.append(dirpath)
import time,re,logging,os
import math
import sys
import subprocess
from Parsers.Cube import Cube
from ElStr import ElectronicStructure
from Geometry import Geom,ListGeoms,IRC
from Containers import AtomicProps
from Tools import web
log = logging.getLogger(__name__)
class FchkGaussian(ElectronicStructure):
"""
Shows 3D-properties from the .fchk file
"""
def __init__(self):
self.densities = []
self.openshell = False
self.cubes = []
self.isotype=''
self.isovalue='0.03'
ElectronicStructure.__init__(self)
self.OK = True
def makeCube(self,prop,name='',colors=''):
fcube = self.settings.realPath(prop+'.cube')
wpcube = self.settings.webPath(prop+'.cube')
command = (self.settings.cubegen, '0', prop, self.file, fcube, self.settings.npoints_cube, 'h')
t1 = time.time()
log.debug('Trying to run command: "%s"' % (str(command)) )
subprocess.call(command)
t2 = time.time()
log.debug('Running cubegen: %.1f s' % (t2-t1))
if os.path.exists(fcube):
log.debug('%s successfully generated' % (fcube))
else:
log.warning('%s has not been created' % (fcube))
c = Cube(name,colors)
c.file = fcube
c.wpcube = wpcube
c.isotype = prop.split('=')[0]
c.isovalue = self.isovalue
c.parse()
return c
def parse(self):
"""
Here, .fchk will be parsed as a text file
Probably, we start here, because .fchk contains valuable
information which might be used
"""
try:
FI = open(self.file)
log.debug('%s was opened for reading' %(self.file))
except:
log.error('Cannot open %s for reading' %(self.file))
"""
http://www.gaussian.com/g_tech/g_ur/f_formchk.htm
All other data contained in the file is located in a labeled line/section set up in one of the following forms:
Scalar values appear on the same line as their data label. This line consists of a string describing the data item, a flag indicating the data type, and finally the value:
Integer scalars: Name,I,IValue, using format A40,3X,A1,5X,I12.
Real scalars: Name,R,Value, using format A40,3X,A1,5X,E22.15.
Character string scalars: Name,C,Value, using format A40,3X,A1,5X,A12.
Logical scalars: Name,L,Value, using format A40,3X,A1,5X,L1.
Vector and array data sections begin with a line naming the data and giving the type and number of values, followed by the data on one or more succeeding lines (as needed):
Integer arrays: Name,I,Num, using format A40,3X,A1,3X,'N=',I12. The N= indicates that this is an array, and the string is followed by the number of values. The array elements then follow starting on the next line in format 6I12.
Real arrays: Name,R,Num, using format A40,3X,A1,3X,'N=',I12, where the N= string again indicates an array and is followed by the number of elements. The elements themselves follow on succeeding lines in format 5E16.8. Note that the Real format has been chosen to ensure that at least one space is present between elements, to facilitate reading the data in C.
Character string arrays (first type): Name,C,Num, using format A40,3X,A1,3X,'N=',I12, where the N= string indicates an array and is followed by the number of elements. The elements themselves follow on succeeding lines in format 5A12.
Character string arrays (second type): Name,H,Num, using format A40,3X,A1,3X,'N=',I12, where the N= string indicates an array and is followed by the number of elements. The elements themselves follow on succeeding lines in format 9A8.
Logical arrays: Name,H,Num, using format A40,3X,A1,3X,'N=',I12, where the N= string indicates an array and is followed by the number of elements. The elements themselves follow on succeeding lines in format 72L1.
All quantities are in atomic units and in the standard orientation, if that was determined by the Gaussian run. Standard orientation is seldom an interesting visual perspective, but it is the natural orientation for the vector fields.
"""
def split_array(s,reclength):
v = []
nrec = int(math.ceil((len(s)-1.0)/reclength))
for i in range(nrec):
rec = s[reclength*i:reclength*(i+1)].strip()
v.append(rec)
return v
self.parsedProps = {}
format_arrays = {
'I' : [6.,12],
'R' : [5.,16],
'C' : [5.,12],
'H' : [9.,8],
}
try:
self.comments = FI.next().rstrip()
s = FI.next().rstrip()
self.JobType, self.lot, self.basis = s[0:10],s[10:20],s[70:80]
for s in FI:
s = s.rstrip()
array_mark = (s[47:49] == 'N=')
if array_mark:
value = []
prop, vtype, nrec = s[:40].strip(), s[43], int(s[49:])
fa = format_arrays[vtype]
nlines = int(math.ceil(nrec/fa[0]))
for _ in range(nlines):
s = FI.next()
v5 = split_array(s,fa[1])
value.extend(v5)
else:
prop, vtype, value = s[:40].strip(), s[43], s[49:].strip()
self.parsedProps[prop] = value
except StopIteration:
log.warning('Unexpected EOF')
FI.close()
log.debug('%s parsed successfully' % (self.file))
return
def postprocess(self):
#
def any_nonzero(ar):
for s in ar:
if float(s)<>0:
return True
return False
#
def getGeom(ar,atnum,atnames,start=0):
Bohr = 0.52917721
g = Geom()
atbase = start
for i in range(atnum):
atn = atnames[i]
xyz = ar[atbase:atbase+3]
x, y, z = map(lambda k: float(k)*Bohr, xyz)
g.coord.append('%s %f %f %f' % (atn,x,y,z))
atbase += 3
pc = AtomicProps(attr='atnames',data=atnames)
g.addAtProp(pc,visible=False) # We hide it, because there is no use to show atomic names for each geometry using checkboxes
return g
#
pp = self.parsedProps
self.charge = pp['Charge']
self.mult = pp['Multiplicity']
self.sym = 'NA'
self.solvent = 'NA'
if 'S**2' in pp:
s2_before = float(pp['S**2'])
s2_after = float(pp['S**2 after annihilation'])
if s2_before > 0.0:
self.openshell = True
self.s2 = '%.4f / %.4f' % (s2_before,s2_after)
if any_nonzero(pp['External E-field']):
self.extra += 'External Electric Field applied'
self.scf_e = float(pp['SCF Energy'])
self.total_e = pp['Total Energy']
atnames = map(lambda k: int(float(k)), pp['Nuclear charges'])
atnum = int(pp['Number of atoms'])
self.geoms = ListGeoms()
is_irc = ('IRC point 1 Geometries' in pp)
is_opt = ('Opt point 1 Geometries' in pp) & False # It might be rather confusing than useful thing, so I'll turn it off for a while
if is_irc:
self.JobType += ' (irc)'
ngeom = int(pp['IRC Number of geometries'][0])
shift = int(pp['IRC Num geometry variables'])
irc_ex = pp['IRC point 1 Results for each geome']
base,exi = 0,0
for i in range(ngeom):
g = getGeom(pp['IRC point 1 Geometries'],atnum,atnames,base)
e,x = irc_ex[exi:exi+2]
g.addProp('x',float(x))
g.addProp('e',float(e))
g.to_kcalmol = 627.509
self.geoms.append(g)
base += shift
exi += 2
self.series = IRC(other=self.geoms)
elif is_opt:
ngeom = int(pp['Optimization Number of geometries'][0])
shift = int(pp['Optimization Num geometry variables'])
opt_ez = pp['Opt point 1 Results for each geome']
base,ezi = 0,0
for i in range(ngeom):
g = getGeom(pp['Opt point 1 Geometries'],atnum,atnames,base)
e,z = opt_ez[ezi:ezi+2]
g.addProp('e',float(e))
g.to_kcalmol = 627.509
self.geoms.append(g)
base += shift
ezi += 2
else:
g = getGeom(pp['Current cartesian coordinates'],atnum,atnames)
# Parse charges
for k in pp:
if ' Charges' in k:
ch = k[:k.find(' ')]
charges = pp[k]
if any_nonzero(charges):
pc = AtomicProps(attr=ch,data = charges)
g.addAtProp(pc)
# Record geometry
self.geoms.append(g)
d_types = ['SCF','MP2','CI','QCI']
for k in pp:
# Energies
if ' Energy' in k:
et = k[:k.find(' ')]
e = pp[k]
if et == 'SCF':
continue
self.extra += '%s: %.8f' %(k,float(e)) + web.brn
# Densities
for dt in d_types:
if ('Total %s Density' % dt) in k:
self.densities.append(dt)
def generateAllCubes(self):
# {A,B}MO=HOMO LUMO ALL OccA OccB Valence Virtuals
# Laplacian
dprops = ['Density', 'Potential']
if self.openshell:
dprops.append('Spin')
props = ['AMO=HOMO','BMO=HOMO','AMO=LUMO','BMO=LUMO']
else:
props = ['MO=HOMO','MO=LUMO']
for d in self.densities:
for p in dprops:
prop = '%s=%s' % (p,d)
c = self.makeCube(prop)
self.cubes.append((c,prop))
for p in props:
c = self.makeCube(p)
self.cubes.append((c,p))
def webData(self):
we = self.settings.Engine3D()
b1,b2 = ElectronicStructure.webData(self)
if self.settings.full:
# Show all cubes
self.generateAllCubes()
s = ''
for c,p in self.cubes:
first_cube = c.wpcube
ctype = p[:p.find('=')]
if ctype == 'Density':
continue
elif ctype == 'Potential':
first_cube = c.wpcube.replace('Potential','Density')
second_cube = c.wpcube
script = we.JMolIsosurface(webpath = first_cube, webpath_other = second_cube, surftype=ctype)
else:
script = c.s_script
s += we.JMolButton(action=script, label=p)
b2 += s
elif self.isotype:
# Show only requested cube
p = self.isotype.lower()
p_splitted = p.split('=')
ctype = p_splitted[0]
if len(p_splitted)>1:
cvalue = p_splitted[1]
if ctype == 'potential':
p_pot = p
p_dens = p.replace('potential','Density')
c_pot = self.makeCube(p_pot)
c_dens = self.makeCube(p_dens)
first_cube = c_dens.wpcube
second_cube = c_pot.wpcube
script = we.JMolIsosurface(webpath = first_cube, webpath_other = second_cube, surftype=ctype)
else:
c = self.makeCube(p)
script = c.s_script
if ctype=='mo':
if cvalue=='homo':
cvalue = self.parsedProps['Number of alpha electrons']
e_orb = float(self.parsedProps['Alpha Orbital Energies'][int(cvalue)-1])*27.211
b2 += 'E(AMO) = %.3f eV' % (e_orb)
if ctype=='amo':
e_orb = float(self.parsedProps['Alpha Orbital Energies'][int(cvalue)-1])*27.211
b2 += 'E(AMO) = %.3f eV' % (e_orb)
if ctype=='bmo':
e_orb = float(self.parsedProps['Beta Orbital Energies'][int(cvalue)-1])*27.211
b2 += 'E(BMO) = %.3f eV' % (e_orb)
b2 += we.JMolButton(action=script, label=p)
return b1,b2
#
#
#
#
#
if __name__ == "__main__":
DebugLevel = logging.DEBUG
logging.basicConfig(level=DebugLevel)
from Settings import Settings
from Top import Top
Top.settings = Settings(FromConfigFile = True)
f = FchkGaussian()
f.file = sys.argv[1]
f.parse()
for k in sorted(f.parsedProps):
v = f.parsedProps[k]
if isinstance(v,list):
print '"%s": array of %i elements; first elements is %s' % (k,len(v),v[0])
else:
print '"%s": %s' % (k, str(v))
#f.makeCube('Density=SCF')
|
mtthwflst/terse
|
Parsers/FchkGaussian.py
|
Python
|
mit
| 13,516
|
[
"Gaussian"
] |
a2e6652c3203f8496383a3cbb1ceaab37e3b6a3fe457b70c3e9cda09f798d7e2
|
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
"""Main gpaw module."""
import os
import sys
try:
from distutils.util import get_platform
except ImportError:
modulepath = os.environ.get('GPAW_GET_PLATFORM')
if modulepath is None:
errmsg = ('Error: Could not get platform from distutils. '
'Set the GPAW_GET_PLATFORM environment variable to '
'the architecture string printed during build.')
raise ImportError(errmsg)
def get_platform():
return modulepath
from glob import glob
from os.path import join, isfile
import numpy as np
assert not np.version.version.startswith('1.6.0')
__all__ = ['GPAW', 'Calculator',
'Mixer', 'MixerSum', 'MixerDif', 'MixerSum2',
'CG', 'Davidson', 'RMM_DIIS', 'LCAO',
'PoissonSolver',
'FermiDirac', 'MethfesselPaxton',
'restart']
class ConvergenceError(Exception):
pass
class KohnShamConvergenceError(ConvergenceError):
pass
class PoissonConvergenceError(ConvergenceError):
pass
# Check for special command line arguments:
debug = False
trace = False
dry_run = 0
memory_estimate_depth = 2
parsize_domain = None
parsize_bands = None
sl_default = None
sl_diagonalize = None
sl_inverse_cholesky = None
sl_lcao = None
sl_lrtddft = None
buffer_size = None
extra_parameters = {}
profile = False
i = 1
while len(sys.argv) > i:
arg = sys.argv[i]
if arg.startswith('--gpaw-'):
# Found old-style gpaw command line argument:
arg = '--' + arg[7:]
raise RuntimeError('Warning: Use %s instead of %s.' %
(arg, sys.argv[i]))
if arg == '--trace':
trace = True
elif arg == '--debug':
debug = True
elif arg.startswith('--dry-run'):
dry_run = 1
if len(arg.split('=')) == 2:
dry_run = int(arg.split('=')[1])
elif arg.startswith('--memory-estimate-depth'):
memory_estimate_depth = -1
if len(arg.split('=')) == 2:
memory_estimate_depth = int(arg.split('=')[1])
elif arg.startswith('--domain-decomposition='):
parsize_domain = [int(n) for n in arg.split('=')[1].split(',')]
if len(parsize_domain) == 1:
parsize_domain = parsize_domain[0]
else:
assert len(parsize_domain) == 3
elif arg.startswith('--state-parallelization='):
parsize_bands = int(arg.split('=')[1])
elif arg.startswith('--sl_default='):
# --sl_default=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_default=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_default = ['d'] * 3
else:
sl_default = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_default.append(int(sl_args[sl_args_index]))
else:
sl_default.append(sl_args[sl_args_index])
elif arg.startswith('--sl_diagonalize='):
# --sl_diagonalize=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_diagonalize=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_diagonalize = ['d'] * 3
else:
sl_diagonalize = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_diagonalize.append(int(sl_args[sl_args_index]))
else:
sl_diagonalize.append(sl_args[sl_args_index])
elif arg.startswith('--sl_inverse_cholesky='):
# --sl_inverse_cholesky=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_inverse_cholesky=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_inverse_cholesky = ['d'] * 3
else:
sl_inverse_cholesky = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_inverse_cholesky.append(int(sl_args[sl_args_index]))
else:
sl_inverse_cholesky.append(sl_args[sl_args_index])
elif arg.startswith('--sl_lcao='):
# --sl_lcao=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_lcao=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_lcao = ['d'] * 3
else:
sl_lcao = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_lcao.append(int(sl_args[sl_args_index]))
else:
sl_lcao.append(sl_args[sl_args_index])
elif arg.startswith('--sl_lrtddft='):
# --sl_lcao=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_lcao=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_lrtddft = ['d'] * 3
else:
sl_lrtddft = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_lrtddft.append(int(sl_args[sl_args_index]))
else:
sl_lrtddft.append(sl_args[sl_args_index])
elif arg.startswith('--buffer_size='):
# Buffer size for MatrixOperator in MB
buffer_size = int(arg.split('=')[1])
elif arg.startswith('--gpaw='):
extra_parameters = eval('dict(%s)' % arg[7:])
elif arg == '--gpaw':
extra_parameters = eval('dict(%s)' % sys.argv.pop(i + 1))
elif arg.startswith('--profile='):
profile = arg.split('=')[1]
else:
i += 1
continue
# Delete used command line argument:
del sys.argv[i]
if debug:
np.seterr(over='raise', divide='raise', invalid='raise', under='ignore')
oldempty = np.empty
def empty(*args, **kwargs):
a = oldempty(*args, **kwargs)
try:
a.fill(np.nan)
except ValueError:
a.fill(-1000000)
return a
np.empty = empty
build_path = join(__path__[0], '..', 'build')
arch = '%s-%s' % (get_platform(), sys.version[0:3])
# If we are running the code from the source directory, then we will
# want to use the extension from the distutils build directory:
sys.path.insert(0, join(build_path, 'lib.' + arch))
def get_gpaw_python_path():
paths = os.environ['PATH'].split(os.pathsep)
paths.insert(0, join(build_path, 'bin.' + arch))
for path in paths:
if isfile(join(path, 'gpaw-python')):
return path
raise RuntimeError('Could not find gpaw-python!')
try:
setup_paths = os.environ['GPAW_SETUP_PATH'].split(os.pathsep)
except KeyError:
if os.pathsep == ';':
setup_paths = [r'C:\gpaw-setups']
else:
setup_paths = ['/usr/local/share/gpaw-setups',
'/usr/share/gpaw-setups']
from gpaw.aseinterface import GPAW
from gpaw.mixer import Mixer, MixerSum, MixerDif, MixerSum2
from gpaw.eigensolvers import Davidson, RMM_DIIS, CG, LCAO
from gpaw.poisson import PoissonSolver
from gpaw.occupations import FermiDirac, MethfesselPaxton
from gpaw.wavefunctions.pw import PW
class Calculator(GPAW):
def __init__(self, *args, **kwargs):
sys.stderr.write('Please start using GPAW instead of Calculator!\n')
GPAW.__init__(self, *args, **kwargs)
def restart(filename, Class=GPAW, **kwargs):
calc = Class(filename, **kwargs)
atoms = calc.get_atoms()
return atoms, calc
if trace:
indent = ' '
path = __path__[0]
from gpaw.mpi import parallel, rank
if parallel:
indent = 'CPU%d ' % rank
def f(frame, event, arg):
global indent
f = frame.f_code.co_filename
if not f.startswith(path):
return
if event == 'call':
print('%s%s:%d(%s)' % (indent, f[len(path):], frame.f_lineno,
frame.f_code.co_name))
indent += '| '
elif event == 'return':
indent = indent[:-2]
sys.setprofile(f)
if profile:
from cProfile import Profile
import atexit
prof = Profile()
def f(prof, filename):
prof.disable()
from gpaw.mpi import rank
if filename == '-':
prof.print_stats('time')
else:
prof.dump_stats(filename + '.%04d' % rank)
atexit.register(f, prof, profile)
prof.enable()
command = os.environ.get('GPAWSTARTUP')
if command is not None:
exec(command)
def is_parallel_environment():
"""Check if we are running in a parallel environment.
This function can be redefined in ~/.gpaw/rc.py. Example::
def is_parallel_environment():
import os
return 'PBS_NODEFILE' in os.environ
"""
return False
home = os.environ.get('HOME')
if home is not None:
rc = os.path.join(home, '.gpaw', 'rc.py')
if os.path.isfile(rc):
# Read file in ~/.gpaw/rc.py
execfile(rc)
|
robwarm/gpaw-symm
|
gpaw/__init__.py
|
Python
|
gpl-3.0
| 10,531
|
[
"GPAW"
] |
92948830100804ee1bc5992c4f0a4b44a1abb901c08e57b83ae80d4940e7aa86
|
# Python code to reading an image using OpenCV
import numpy as np
import cv2
import os
# Init video capture
cap = cv2.VideoCapture(0)
if cap is None or not cap.isOpened():
print('Error: unable to open video source: ', source)
exit(0)
while True:
# Capture frame-by-framehttps://github.com/iproduct/course-angular/wiki/Important-Dates
ret, frame = cap.read()
resized = cv2.resize(frame, (300, 200))
# Our operations on the frame come here
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
# applying different thresholding
# techniques on the input image
# all pixels value above 120 will
# be set to 255
ret, thresh1 = cv2.threshold(gray, 120, 255, cv2.THRESH_BINARY)
ret, thresh2 = cv2.threshold(gray, 120, 255, cv2.THRESH_BINARY_INV)
ret, thresh3 = cv2.threshold(gray, 120, 255, cv2.THRESH_TRUNC)
ret, thresh4 = cv2.threshold(gray, 120, 255, cv2.THRESH_TOZERO)
ret, thresh5 = cv2.threshold(gray, 120, 255, cv2.THRESH_TOZERO_INV)
# applying different thresholding techniques on the input image
thresh6 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 41, 5)
thresh7 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 41, 5)
# applying Otsu thresholding as an extra flag in binary thresholding
ret, thresh8 = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY +
cv2.THRESH_OTSU)
# the window showing output images
# with the corresponding thresholding
# techniques applied to the input images
cv2.imshow('Binary Threshold', thresh1)
cv2.imshow('Binary Threshold Inverted', thresh2)
cv2.imshow('Truncated Threshold', thresh3)
cv2.imshow('Set to 0', thresh4)
cv2.imshow('Set to 0 Inverted', thresh5)
cv2.imshow('Adaptive Mean', thresh6)
cv2.imshow('Adaptive Gaussian', thresh7)
cv2.imshow('Otsu Threshold', thresh8)
# Display the resulting frame
cv2.imshow('original', resized)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
iproduct/course-social-robotics
|
09-image-recognition-opencv-dnn/cv2_camera_tresholding.py
|
Python
|
gpl-2.0
| 2,292
|
[
"Gaussian"
] |
7a9baeaa8ddfbf0dc97a5335b3326fae49fd9678f46569848e342eedcd5c194d
|
import numpy as np
from ase import io, Atoms
def convertNames(inFile, outFile):
xyz = open(outFile,'w')
with open(inFile) as f:
for line in f:
split = line.strip().split()
if len(split) < 4:
xyz.write(line)
continue
name = split[0]
split[1:] = [float(x) for x in split[1:]]
if name[0].lower() == 'c':
split[0] = 'C'
elif name[0].lower() == 'o':
split[0] = 'O'
elif name[0].lower() == 'h':
split[0] = 'H'
elif name[0:2].lower() == 'ni':
split[0] = 'Ni'
elif name[0].lower() == 'n':
split[0] = 'N'
xyz.write(("{:10} "+"{:20.6f} "*3+"\n").format(*split))
xyz.close()
def getBonds(A,B,inMol,bondList):
#get all bonds A-B
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
allBonds = []
for molecule in mols:
nAtoms = len(molecule)
bonds = []
allIdx = []
for i in range(0,nAtoms):
if molecule[i].symbol == A:
allIdx.append(i)
for iIdx in allIdx:
try:
ibonds = bondList[str(iIdx)]
except:
continue
for bonded in ibonds:
if not molecule[bonded].symbol == B:
continue
bonds.append([iIdx, bonded])
#delete duplicates if A=B
if A == B:
for bond in bonds:
del bonds[bonds.index(list(reversed(bond)))]
allBonds.extend(bonds)
return allBonds
def getAngles(A,B,C,inMol,bondList):
#get all angles B-A-C
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
allAngles = []
for molecule in mols:
nAtoms = len(molecule)
angles = []
allIdx = []
for i in range(0,nAtoms):
if molecule[i].symbol == A:
allIdx.append(i)
for iIdx in allIdx:
try:
bonds = bondList[str(iIdx)]
except:
continue
for bonded in bonds:
if not molecule[bonded].symbol == B:
continue
for j in bonds:
if j == bonded:
continue
if molecule[j].symbol == C:
angles.append([bonded, iIdx, j])
#delete duplicates if B=C
if B == C:
for angle in angles:
del angles[angles.index(list(reversed(angle)))]
allAngles.extend(angles)
return allAngles
def getDihedrals(A,B,C,D,molecule,bondList):
"""Make a list of all Dihedrals"""
dihedralList = []
allIdx = []
nAtoms = len(molecule)
for i in range(0,nAtoms):
if molecule[i].symbol == A:
allIdx.append(i)
for idx in allIdx:#A
try:
ibonds = bondList[str(idx)]
except:
continue
for j in bondList[str(idx)]:
if not molecule[j].symbol == B:
continue
for k in bondList[str(j)]:
if not molecule[k].symbol == C:
continue
if idx == k:
continue
for l in bondList[str(k)]:
if not molecule[l].symbol == D:
continue
if (not l == k) and (not l == idx) and (not l == j):
dihedralList.append([idx,j,k,l])
return dihedralList
def bond(v1,v2):
""" Returns the length of the vector. """
return np.linalg.norm(np.subtract(v1,v2))
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle(v1, v2):
""" Angle in Degree"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))/np.pi*180
def dihedral(vec1,vec2,vec3):
""" Dihedral in Degree"""
v1_u = unit_vector(vec1)
v2_u = unit_vector(vec2)
v3_u = unit_vector(vec3)
#get the two normal vectors standing on the planes
v1v2 = np.cross(v1_u,v2_u)
v2v3 = np.cross(v2_u,v3_u)
#angle between them is the dihedral
return angle(v1v2,v2v3)
def getPBCVector(staticVec, vec, box, cut=5.0):
#find new pbcVec using PBC so that pbcVec-staticVec is less then 5A away
#test 6 most propable directions first
pbcVec = np.subtract(vec,staticVec)
for i in range(0,3):
for j in [-1,1]:
newVec = np.add(vec,box[i]*j)
newVec = np.subtract(newVec,staticVec)
if np.linalg.norm(newVec) < cut:
return newVec
#if not yet exited, perhaps it is one of the boxes on the edges
#there are eight of them
for dim in range(0,3):
dims = list(range(0,3))
dims.remove(dim)
for i in [-1,1]:
for j in [-1,1]:
translate = np.add(box[dims[0]]*i,box[dims[1]]*j)
newVec = np.add(vec,translate)
newVec = np.subtract(newVec,staticVec)
if np.linalg.norm(newVec) < cut:
return newVec
#check the corner-connected boxes
for i in [-1,1]:
for j in [-1,1]:
for k in [-1,1]:
translate = np.add(box[0]*i,box[1]*j)
translate = np.add(translate,box[2]*k)
newVec = np.add(vec,translate)
newVec = np.subtract(newVec,staticVec)
if np.linalg.norm(newVec) < cut:
return newVec
#if there is no result yet something is wrong
raise ValueError("No matching PBC point found!")
def getBondValues(inMol,bondLists):
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
bonds = {}
for name in bondLists:
bonds[name] = []
for molecule in mols:
for name, bondList in bondLists.items():
for item in bondList:
bonds[name].append(molecule.get_distance(item[0],item[1],mic=True))
return bonds
def getAngleValues(inMol,angleLists):
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
angles = {}
for name in angleLists:
angles[name] = []
for molecule in mols:
for name, angleList in angleLists.items():
for item in angleList:
angles[name].append(molecule.get_angle(item[0],item[1],item[2],mic=True))
return angles
def getDihedralValues(inMol, dihedralLists):
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
dihedrals = {}
for name in dihedralLists:
dihedrals[name] = []
for molecule in mols:
for name, dihedralList in dihedralLists.items():
for item in dihedralList:
dihedrals[name].append(molecule.get_dihedral(item[0],item[1],item[2],item[3],mic=True))
return dihedrals
def get_distance2plane(inMol,idxP1,idxP2,idxP3,idxDist):
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
dists = []
for mol in mols:
molecule = mol.copy()
toCenter = -1.0 * molecule[idxP1].position
molecule.translate(toCenter)
print(molecule[idxP1])
print(molecule[idxP2])
print(molecule[idxP3])
toXAxis = molecule[idxP2].position
molecule.rotate(toXAxis,'x',rotate_cell=True)
print(molecule[idxP1])
print(molecule[idxP2])
print(molecule[idxP3])
toXYPlane = molecule[idxP3].position[:]
toXYPlane[0] = 0
molecule.rotate(toXYPlane,'y')
print(molecule[idxP1])
print(molecule[idxP2])
print(molecule[idxP3])
print(molecule[idxDist])
dists.append(abs(molecule[idxDist].position[-1]))
return dists
|
patrickmelix/Python4ChemistryTools
|
geometryAnalyzer.py
|
Python
|
mit
| 7,994
|
[
"ASE"
] |
cc2f4366f25d8253e9e3face139e9cba7a61b489a99c1b5a2201276ad2c41801
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Input file writer for SPECFEM3D_CARTESIAN.
:copyright:
Emanuele Casarotti (emanuele.casarotti@ingv.it), 2013
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2013
:license:
GNU General Public License, Version 3
(http://www.gnu.org/copyleft/gpl.html)
"""
import inspect
import math
import os
# Define the required configuration items. The key is always the name of the
# configuration item and the value is a tuple. The first item in the tuple is
# the function or type that it will be converted to and the second is the
# documentation.
REQUIRED_CONFIGURATION = {
"NPROC": (int, "number of MPI processors"),
"NSTEP": (int, "The number of time steps"),
"DT": (float, "The time increment in seconds"),
"SIMULATION_TYPE": (
int, "forward or adjoint simulation, 1 = forward, "
"2 = adjoint, 3 = both simultaneously")
}
# The default configuration item. Contains everything that can sensibly be set
# to some default value. The syntax is very similar to the
# REQUIRED_CONFIGURATION except that the tuple now has three items, the first
# one being the actual default value.
DEFAULT_CONFIGURATION = {
"NOISE_TOMOGRAPHY": (
0, int, "noise tomography simulation, "
"0 = earthquake simulation, 1/2/3 = three steps in noise simulation"),
"SAVE_FORWARD": (False, bool, "save forward wavefield"),
"UTM_PROJECTION_ZONE": (
11, int,
"set up the utm zone, if SUPPRESS_UTM_PROJECTION is false"),
"SUPPRESS_UTM_PROJECTION": (True, bool, "suppress the utm projection"),
"NGNOD": (
8, int, "number of nodes for 2D and 3D shape functions for "
"hexahedral,we use either 8-node mesh elements (bricks) or 27-node "
"elements.If you use our internal mesher, the only option is 8-node "
"bricks (27-node elements are not supported)"),
"MODEL": (
"default", str, "setup the geological models, options are: "
"default (model parameters described by mesh properties), 1d_prem,"
"1d_socal,1d_cascadia,aniso,external,gll,salton_trough,tomo"),
"SEP_MODEL_DIRECTORY": (
"./DATA/my_SEP_model/", str, "SEP model folder if you are using one"),
"APPROXIMATE_OCEAN_LOAD": (
False, bool, "see SPECFEM3D_CARTESIAN manual"),
"TOPOGRAPHY": (False, bool, "see SPECFEM3D_CARTESIAN manual"),
"ATTENUATION": (False, bool, "see SPECFEM3D_CARTESIAN manual"),
"FULL_ATTENUATION_SOLID": (
False, bool, "see SPECFEM3D_CARTESIAN manual"),
"ANISOTROPY": (False, bool, "see SPECFEM3D_CARTESIAN manual"),
"GRAVITY": (False, bool, "see SPECFEM3D_CARTESIAN manual"),
"TOMOGRAPHY_PATH": ("../DATA/tomo_files/", str,
"path for external tomographic models files"),
"USE_OLSEN_ATTENUATION": (
False, bool,
"use the Olsen attenuation, Q_mu = constant * v_s attenuation rule"),
"OLSEN_ATTENUATION_RATIO": (
0.05, float,
"Olsen's constant for Q_mu = constant * v_s attenuation rule"),
"PML_CONDITIONS": (
False, bool,
"C-PML boundary conditions for a regional simulation"),
"PML_INSTEAD_OF_FREE_SURFACE": (
False, bool,
"C-PML boundary conditions instead of free surface on the top"),
"f0_FOR_PML": (12.7, float, "C-PML dominant frequency,see manual"),
"STACEY_ABSORBING_CONDITIONS": (
False, bool,
"Stacey absorbing boundary conditions for a regional simulation"),
"STACEY_INSTEAD_OF_FREE_SURFACE": (
False, bool, "Stacey absorbing top "
"surface (defined in mesh as 'free_surface_file')"),
"CREATE_SHAKEMAP": (False, bool, "save shakemap files"),
"MOVIE_SURFACE": (
False, bool,
"save velocity snapshot files only for surfaces"),
"MOVIE_TYPE": (1, int, ""),
"MOVIE_VOLUME": (
False, bool,
"save the entire volumetric velocity snapshot files "),
"SAVE_DISPLACEMENT": (
False, bool,
"save displacement instead velocity in the snapshot files"),
"USE_HIGHRES_FOR_MOVIES": (
False, bool,
"save high resolution snapshot files (all GLL points)"),
"NTSTEP_BETWEEN_FRAMES": (
200, int,
"number of timesteps between 2 consecutive snapshots"),
"HDUR_MOVIE": (0.0, float,
"half duration for snapshot files"),
"SAVE_MESH_FILES": (
False, bool,
"save VTK mesh files to check the mesh"),
"LOCAL_PATH": (
"../OUTPUT_FILES/DATABASES_MPI", str,
"path to store the local database file on each node"),
"NTSTEP_BETWEEN_OUTPUT_INFO": (
500, int, "interval at which we output "
"time step info and max of norm of displacement"),
"NTSTEP_BETWEEN_OUTPUT_SEISMOS": (
10000, int,
"interval in time steps for writing of seismograms"),
"NTSTEP_BETWEEN_READ_ADJSRC": (
0, int, "interval in time steps for "
"reading adjoint traces,0 = read the whole adjoint sources at the "
"same time"),
"USE_FORCE_POINT_SOURCE": (
False, bool, "# use a (tilted) "
"FORCESOLUTION force point source (or several) instead of a "
"CMTSOLUTION moment-tensor source. If this flag is turned on, "
"the FORCESOLUTION file must be edited by precising:\n- the "
"corresponding time-shift parameter,\n - the half duration parameter "
"of the source,\n - the coordinates of the source,\n - the magnitude "
"of the force source,\n - the components of a (non-unitary) direction "
"vector for the force source in the E/N/Z_UP basis.\n The direction "
"vector is made unitary internally in the code and thus only its "
"direction matters here;\n its norm is ignored and the norm of the "
"force used is the factor force source times the source time "
"function."),
"USE_RICKER_TIME_FUNCTION": (
False, bool, "set to true to use a Ricker "
"source time function instead of the source time functions set by "
"default to represent a (tilted) FORCESOLUTION force point source or "
"a CMTSOLUTION moment-tensor source."),
"GPU_MODE": (False, bool, "set .true. for GPU support"),
"ROTATE_PML_ACTIVATE": (False, bool, ""),
"ROTATE_PML_ANGLE": (0.0, float, ""),
"PRINT_SOURCE_TIME_FUNCTION": (False, bool, ""),
"COUPLE_WITH_EXTERNAL_CODE": (False, bool, ""),
"EXTERNAL_CODE_TYPE": (1, int, "1 = DSM, 2 = AxiSEM, 3 = FK"),
"TRACTION_PATH": ("./DATA/DSM_tractions_for_specfem3D/", str, ""),
"MESH_A_CHUNK_OF_THE_EARTH": (True, bool, ""),
"ADIOS_ENABLED": (False, bool, ""),
"ADIOS_FOR_DATABASES": (False, bool, ""),
"ADIOS_FOR_MESH": (False, bool, ""),
"ADIOS_FOR_FORWARD_ARRAYS": (False, bool, ""),
"ADIOS_FOR_KERNELS": (False, bool, ""),
}
def write(config):
"""
Writes a Par_file for SPECFEM3D_CARTESIAN.
"""
def fbool(value):
"""
Convert a value to a FORTRAN boolean representation.
"""
if value:
return ".true."
else:
return ".false."
for key, value in config.iteritems():
if isinstance(value, bool):
config[key] = fbool(value)
template_file = os.path.join(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe()))),
"specfem_cartesian_202_dev_par_file.template")
with open(template_file, "rt") as fh:
par_file_template = fh.read()
par_file = par_file_template.format(**config).strip()
return par_file
|
KNMI/VERCE
|
scigateway-api/src/scigateway_services/wfs_input_generator/backends/write_SPECFEM3D_CARTESIAN_202_DEV.py
|
Python
|
mit
| 7,881
|
[
"VTK"
] |
9ab76eac8d57cf31c2f64c51a58f9b5966dcd6676af64f0db5f6cea6a944383e
|
from distutils.core import setup
setup(
name = 'SymGP',
packages = ['SymGP'], # this must be the same as the name above
version = '0.0.1',
description = 'A symbolic algebra library for multivariate Gaussian and Gaussian process probabilistic models',
author = 'Joshua Aduol',
author_email = 'jaduol@yahoo.co.uk',
url = 'https://github.com/jna29/SymGP/',
download_url = 'https://github.com/jna29/SymGP/archive/0.0.1.tar.gz',
keywords = ['testing', 'logging', 'example'], # arbitrary keywords
classifiers = []
)
|
jna29/SymGP
|
symgp/setup.py
|
Python
|
mit
| 529
|
[
"Gaussian"
] |
c134267a643d84855e39a5789f645f8cb2b42717cec35f36f436da7b9bc21741
|
# The MIT License
#
# Copyright (c) 2008
# Shibzoukhov Zaur Moukhadinovich
# szport@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
NoneType = type(None)
from types import BuiltinFunctionType, BuiltinMethodType, FunctionType, MethodType
from sys import version_info
py_version = version_info[0]*10 + version_info[1]
if py_version >= 30:
simpleTypes = (NoneType, int, str, bool, float, bytes)
else:
simpleTypes = (NoneType, int, long, str, bool, float, bytes)
constants = ((), frozenset())
class Cacher(object):
#
def __init__(self):
self.objects_cache = {}
self.objects_info = {}
method_cache = {}
def cache_method(name):
def func(m, name=name):
method_cache[name] = m
return m
return func
def visit(self, o):
if type(o) in simpleTypes:
return
if o in constants:
return
oId = id(o)
if oId in self.objects_cache:
info = self.objects_info[oId]
if info == 0:
self.objects_info[oId] = 1
else:
self.objects_cache[oId] = o
self.objects_info[oId] = 0
method = method_cache.get(o.__class__.__name__, visit_object)
method(self, o)
#
@cache_method('list')
def visit_list(self, o):
for item in o:
visit(self, item)
#
@cache_method('set')
def visit_set(self, o):
for item in o:
visit(self, item)
#
@cache_method('frosenset')
def visit_frozenset(self, o):
for item in o:
visit(self, item)
#
@cache_method('tuple')
def visit_tuple(self, o):
for item in o:
visit(self, item)
#
@cache_method('object')
def visit_object(self, o):
return
#
@cache_method('type')
def visit_type(self, o):
metatype = o.__class__
if metatype == type:
return
else:
return
#
@cache_method('dict')
def visit_dict(self, o):
for key,item in o.items():
visit(self, key)
visit(self, item)
@cache_method('property')
def visit_property(self, o):
for f in (o.fget, o.fset, o.fdel, o.__doc__):
if f is not None:
visit(self, f)
@cache_method('function')
def visit_function(self, o):
return
#
@cache_method('method')
def visit_method(self, o):
return visit(self, o.__self__)
#
@cache_method('builtin_function_or_method')
def visit_builtin_function_or_method(self, o):
return
@cache_method('object')
def visit_object(self, o):
if isinstance(o, type):
return visit_type(self, o)
reduce = getattr(o, '__reduce__', None)
if reduce:
state = reduce()
return with_reduce(self, state)
else:
newname = o.__class__.__name__
newargs = None
getnewargs = getattr(o, '__getnewargs__', None)
if getnewargs:
newargs = getnewargs()
state = None
getstate = getattr(o, '__getstate__', None)
if getstate:
state = getstate()
else:
state = getattr(o, '__dict__', None)
if state is None:
state = {}
for name in o.__slots__:
value = getattr(o, name, null)
if value is not null:
state[name] = value
return without_reduce(self, newargs, state)
#
def with_reduce(self, state):
visit(self, state[0])
n = len(state)
if n > 1:
if state[1]:
for item in state[1]:
visit(self, item)
if n > 2:
if state[2]:
for k, v in state[2].items():
visit(self, v)
if n > 3:
if state[3]:
for v in state[3]:
visit(self, v)
if n > 4:
if state[4]:
for k, v in state[4].items():
visit(self, k)
visit(self, v)
#
def without_reduce(self, args, state):
if args:
for item in args:
visit(self, item)
if state:
visit(self, state)
|
intellimath/pyon
|
dumpcache.py
|
Python
|
mit
| 5,027
|
[
"VisIt"
] |
25004a2eb35f9c2607c70b580ed4791734f0a134ff843dcb4692390793eb1c69
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
Classes for discrete measure data objects.
Includes point_mass, measure, product_measure, and scenario classes.
"""
# Adapted from seesaw2d.py in branches/UQ/math/examples2/
# For usage example, see seesaw2d_inf_example.py .
from mystic.math.measures import impose_mean, impose_expectation
from mystic.math.measures import impose_spread, impose_variance
from mystic.math.measures import impose_weight_norm
class point_mass(object):
""" a point_mass object with weight and position
queries:
p.weight -- returns weight
p.position -- returns position
p.rms -- returns the square root of sum of squared position
settings:
p.weight = w1 -- set the weight
p.position = x1 -- set the position
"""
def __init__(self, position, weight=1.0):
self.weight = weight
self.position = position
return
def __repr__(self):
return "(%s @%s)" % (self.weight, self.position)
def __rms(self): # square root of sum of squared positions
from math import sqrt
return sqrt(sum([i**2 for i in self.position]))
# interface
rms = property(__rms)
pass
class measure(list): #FIXME: meant to only accept point_masses...
""" a 1-d collection of point_masses forming a 'discrete_measure'
s = measure([point_mass1, point_mass2, ..., point_massN])
where a point_mass has weight and position
queries:
s.weights -- returns list of weights
s.positions -- returns list of positions
s.npts -- returns the number of point_masses
s.mass -- calculates sum of weights
s.center_mass -- calculates sum of weights*positions
s.range -- calculates |max - min| for positions
s.var -- calculates mean( |positions - mean(positions)|**2 )
settings:
s.weights = [w1, w2, ..., wn] -- set the weights
s.positions = [x1, x2, ..., xn] -- set the positions
s.normalize() -- normalize the weights to 1.0
s.center_mass(R) -- set the center of mass
s.range(R) -- set the range
s.var(R) -- set the variance
methods:
s.support() -- get the positions that have corresponding non-zero weights
s.support_index() -- get the indicies of positions that have support
s.maximum(f) -- calculate the maximum for a given function
s.minimum(f) -- calculate the minimum for a given function
s.ess_maximum(f) -- calculate the maximum for support of a given function
s.ess_minimum(f) -- calculate the minimum for support of a given function
s.expect(f) -- calculate the expectation
s.set_expect((center,delta), f) -- impose expectation by adjusting positions
notes:
- constraints should impose that sum(weights) should be 1.0
- assumes that s.n = len(s.positions) == len(s.weights)
"""
def support_index(self, tol=0):
"""get the indicies of the positions which have non-zero weight
Inputs:
tol -- weight tolerance, where any weight <= tol is considered zero
"""
from measures import support_index
return support_index(self.weights, tol)
def support(self, tol=0):
"""get the positions which have non-zero weight
Inputs:
tol -- weight tolerance, where any weight <= tol is considered zero
"""
from measures import support
return support(self.positions, self.weights, tol)
def __weights(self):
return [i.weight for i in self]
def __positions(self):
return [i.position for i in self]
def __n(self):
return len(self)
def __mass(self):
return sum(self.weights)
#from mystic.math.measures import norm
#return norm(self.weights) # normalized by self.npts
def __mean(self):
from mystic.math.measures import mean
return mean(self.positions, self.weights)
def __range(self):
from mystic.math.measures import spread
return spread(self.positions)
def __variance(self):
from mystic.math.measures import variance
return variance(self.positions, self.weights)
def __set_weights(self, weights):
for i in range(len(weights)):
self[i].weight = weights[i]
return
def __set_positions(self, positions):
for i in range(len(positions)):
self[i].position = positions[i]
return
def normalize(self):
"""normalize the weights"""
self.positions, self.weights = impose_weight_norm(self.positions, self.weights)
return
def __set_mean(self, m):
self.positions = impose_mean(m, self.positions, self.weights)
return
def __set_range(self, r):
self.positions = impose_spread(r, self.positions, self.weights)
return
def __set_variance(self, v):
self.positions = impose_variance(v, self.positions, self.weights)
return
def maximum(self, f):
"""calculate the maximum for a given function
Inputs:
f -- a function that takes a list and returns a number
"""
from measures import maximum
return maximum(f, self.positions)
def ess_maximum(self, f, tol=0.):
"""calculate the maximum for the support of a given function
Inputs:
f -- a function that takes a list and returns a number
tol -- weight tolerance, where any weight <= tol is considered zero
"""
from measures import ess_maximum
return ess_maximum(f, self.positions, self.weights, tol)
def minimum(self, f):
"""calculate the minimum for a given function
Inputs:
f -- a function that takes a list and returns a number
"""
from measures import minimum
return minimum(f, self.positions)
def ess_minimum(self, f, tol=0.):
"""calculate the minimum for the support of a given function
Inputs:
f -- a function that takes a list and returns a number
tol -- weight tolerance, where any weight <= tol is considered zero
"""
from measures import ess_minimum
return ess_minimum(f, self.positions, self.weights, tol)
def expect(self, f):
"""calculate the expectation for a given function
Inputs:
f -- a function that takes a list and returns a number
""" #XXX: maybe more natural if f takes a positional value x, not a list x ?
from mystic.math.measures import expectation
positions = [(i,) for i in self.positions]
return expectation(f, positions, self.weights)
def set_expect(self, (m,D), f, bounds=None, constraints=None):
"""impose a expectation on a dirac measure
Inputs:
(m,D) -- tuple of expectation m and acceptable deviation D
f -- a function that takes a list and returns a number
bounds -- tuple of lists of bounds (lower_bounds, upper_bounds)
constraints -- a function that takes a product_measure c' = constraints(c)
""" #XXX: maybe more natural if f takes a positional value x, not a list x ?
#XXX: maybe also natural c' = constraints(c) where c is a measure ?
if constraints: # then need to adjust interface for 'impose_expectation'
def cnstr(x, w):
c = compose(x,w)
c = constraints(c)
return decompose(c)[0]
else: cnstr = constraints # 'should' be None
positions = impose_expectation((m,D), f, [self.npts], bounds, \
self.weights, constraints=cnstr)
from numpy import array
self.positions = list(array(positions)[:,0])
#from numpy import squeeze
#self.positions = list(squeeze(positions))
return
# interface
weights = property(__weights, __set_weights)
positions = property(__positions, __set_positions)
###XXX: why not use 'points' also/instead?
npts = property(__n )
mass = property(__mass )
range = property(__range, __set_range)
center_mass = property(__mean, __set_mean)
var = property(__variance, __set_variance)
# backward compatibility
coords = positions
get_expect = expect
mean = center_mass
pass
class product_measure(list): #FIXME: meant to only accept sets...
""" a N-d measure-theoretic product of discrete measures
c = product_measure([measure1, measure2, ..., measureN])
where all measures are orthogonal
queries:
c.npts -- returns total number of point_masses
c.weights -- returns list of weights
c.positions -- returns list of position tuples
c.mass -- returns list of weight norms
c.pts -- returns number of point_masses for each discrete measure
c.wts -- returns list of weights for each discrete measure
c.pos -- returns list of positions for each discrete measure
settings:
c.positions = [(x1,y1,z1),...] -- set positions (tuples in product measure)
methods:
c.pof(f) -- calculate the probability of failure
c.sampled_pof(f, npts) -- calculate the pof using sampled point_masses
c.expect(f) -- calculate the expectation
c.set_expect((center,delta), f) -- impose expectation by adjusting positions
c.flatten() -- convert measure to a flat list of parameters
c.load(params, pts) -- 'fill' the measure from a flat list of parameters
c.update(params) -- 'update' the measure from a flat list of parameters
notes:
- constraints impose expect (center - delta) <= E <= (center + delta)
- constraints impose sum(weights) == 1.0 for each set
- assumes that c.npts = len(c.positions) == len(c.weights)
- weight wxi should be same for each (yj,zk) at xi; similarly for wyi & wzi
"""
def __val(self):
raise NotImplementedError, "'value' is undefined in a measure"
def __pts(self):
return [i.npts for i in self]
def __wts(self):
return [i.weights for i in self]
def __pos(self):
return [i.positions for i in self]
def __mean(self):
return [i.center_mass for i in self]
def __set_mean(self, center_masses):
[i._measure__set_mean(center_masses[m]) for (m,i) in enumerate(self)]
#for i in range(len(center_masses)):
# self[i].center_mass = center_masses[i]
return
def __n(self):
from numpy import product
return product(self.pts)
def support_index(self, tol=0):
from measures import support_index
return support_index(self.weights, tol)
def support(self, tol=0): #XXX: better if generated positions only when needed
from measures import support
return support(self.positions, self.weights, tol)
def __weights(self):
from mystic.math.measures import _pack
from numpy import product
weights = _pack(self.wts)
_weights = []
for wts in weights:
_weights.append(product(wts))
return _weights
def __positions(self):
from mystic.math.measures import _pack
return _pack(self.pos)
def __set_positions(self, positions):
from mystic.math.measures import _unpack
positions = _unpack(positions, self.pts)
for i in range(len(positions)):
self[i].positions = positions[i]
return
#def __get_center(self):
# return self.__center
#def __get_delta(self):
# return self.__delta
def __mass(self):
return [self[i].mass for i in range(len(self))]
def maximum(self, f): #XXX: return max of all or return all max?
return max([i.maximum(f) for i in self])
def minimum(self, f): #XXX: return min of all or return all min?
return min([i.minimum(f) for i in self])
def ess_maximum(self, f, tol=0.): #XXX: return max of all or return all max?
return max([i.ess_maximum(f, tol) for i in self])
def ess_minimum(self, f, tol=0.): #XXX: return min of all or return all min?
return min([i.ess_minimum(f, tol) for i in self])
def expect(self, f):
"""calculate the expectation for a given function
Inputs:
f -- a function that takes a list and returns a number
"""
from mystic.math.measures import expectation
return expectation(f, self.positions, self.weights)
def set_expect(self, (m,D), f, bounds=None, constraints=None):
"""impose a expectation on a product measure
Inputs:
(m,D) -- tuple of expectation m and acceptable deviation D
f -- a function that takes a list and returns a number
bounds -- tuple of lists of bounds (lower_bounds, upper_bounds)
constraints -- a function that takes a product_measure c' = constraints(c)
"""
#self.__center = m
#self.__delta = D
if constraints: # then need to adjust interface for 'impose_expectation'
def cnstr(x, w):
c = compose(x,w)
c = constraints(c)
return decompose(c)[0]
else: cnstr = constraints # 'should' be None
self.positions = impose_expectation((m,D), f, self.pts, bounds, \
self.weights, constraints=cnstr)
return
def pof(self, f):
"""calculate probability of failure over a given function, f,
where f takes a list of (product_measure) positions and returns a single value
Inputs:
f -- a function that returns True for 'success' and False for 'failure'
"""
u = 0
set = zip(self.positions, self.weights)
for x in set:
if f(x[0]) <= 0.0:
u += x[1]
return u
# for i in range(self.npts):
# #if f(self.positions[i]) > 0.0: #NOTE: f(x) > 0.0 yields prob of success
# if f(self.positions[i]) <= 0.0: #NOTE: f(x) <= 0.0 yields prob of failure
# u += self.weights[i]
# return u #XXX: does this need to be normalized?
def sampled_pof(self, f, npts=10000):
"""calculate probability of failure over a given function, f,
where f takes a list of (product_measure) positions and returns a single value
Inputs:
f -- a function that returns True for 'success' and False for 'failure'
npts -- number of point_masses sampled from the underlying discrete measures
"""
from mystic.math.samples import _pof_given_samples
pts = self.sampled_support(npts)
return _pof_given_samples(f, pts)
def sampled_support(self, npts=10000): ##XXX: was 'def support'
"""randomly select support points from the underlying discrete measures
Inputs:
npts -- number of points sampled from the underlying discrete measures
Returns:
pts -- a nested list of len(prod_measure) lists, each of len(npts)
"""
from mystic.math.measures import weighted_select as _select
pts = []
for i in range(npts):
# for a single trial, select positions from all sets
pts.append( [_select(set.positions, set.weights) for set in self] )
# convert pts to len(prod_meas) lists, each of len(npts)
from numpy import transpose
return transpose(pts) #XXX: assumes 'positions' is a list of floats
def update(self, params):
"""update the product measure from a list of parameters
The dimensions of the product measure will not change"""
pts = self.pts
_len = 2 * sum(pts)
if len(params) > _len: # if Y-values are appended to params
params, values = params[:_len], params[_len:]
pm = unflatten(params, pts)
zo = pm.count([])
self[:] = pm[:len(self) - zo] + self[len(pm) - zo:]
return
def load(self, params, pts):
"""load a list of parameters corresponding to N x 1D discrete measures
Inputs:
params -- a list of parameters (see 'notes')
pts -- number of point_masses in each of the underlying discrete measures
Notes:
To append len(pts) new discrete measures to product measure c, where
pts = (M, N, ...)
params = [wt_x1, ..., wt_xM, \
x1, ..., xM, \
wt_y1, ..., wt_yN, \
y1, ..., yN, \
...]
Thus, the provided list is M weights and the corresponding M positions,
followed by N weights and the corresponding N positions, with this
pattern followed for each new dimension desired for the product measure.
"""
_len = 2 * sum(pts)
if len(params) > _len: # if Y-values are appended to params
params, values = params[:_len], params[_len:]
self.extend( unflatten(params, pts) )
return
def flatten(self):
"""flatten the product_measure into a list of parameters
Returns:
params -- a list of parameters (see 'notes')
Notes:
For a product measure c where c.pts = (M, N, ...), then
params = [wt_x1, ..., wt_xM, \
x1, ..., xM, \
wt_y1, ..., wt_yN, \
y1, ..., yN, \
...]
Thus, the returned list is M weights and the corresponding M positions,
followed by N weights and the corresponding N positions, with this
pattern followed for each dimension of the product measure.
"""
params = flatten(self)
return params
#XXX: name stinks... better as "non_redundant"? ...is really a helper
def differs_by_one(self, ith, all=True, index=True):
"""get the product measure coordinates where the associated binary
string differs by exactly one index
Inputs:
ith = the target index
all = if False, return only the results for indicies < i
index = if True, return the index of the results (not results themselves)
"""
from mystic.math.compressed import index2binary, differs_by_one
b = index2binary(range(self.npts), self.npts)
return differs_by_one(ith, b, all, index)
def select(self, *index, **kwds):
"""generator for product measure positions due to selected position indicies
(NOTE: only works for product measures of dimension 2^K)
>>> r
[[9, 8], [1, 3], [4, 2]]
>>> r.select(*range(r.npts))
[(9, 1, 4), (8, 1, 4), (9, 3, 4), (8, 3, 4), (9, 1, 2), (8, 1, 2), (9, 3, 2), (8, 3, 2)]
>>>
>>> _pack(r)
[(9, 1, 4), (8, 1, 4), (9, 3, 4), (8, 3, 4), (9, 1, 2), (8, 1, 2), (9, 3, 2), (8, 3, 2)]
"""
from mystic.math.compressed import index2binary, binary2coords
v = index2binary(list(index), self.npts)
return binary2coords(v, self.pos, **kwds)
#XXX: '_pack' requires resorting ([::-1]) so that indexing is wrong.
# Better if modify mystic's pack to match sorting of binary strings ?
#__center = None
#__delta = None
# interface
npts = property(__n )
weights = property(__weights )
positions = property(__positions, __set_positions )
center_mass = property(__mean, __set_mean)
#center = property(__get_center ) #FIXME: remove c.center and c.delta... or
#delta = property(__get_delta ) # replace with c._params (e.g. (m,D))
#expect = property(__expect, __set_expect )
mass = property(__mass )
pts = property(__pts )
wts = property(__wts )
pos = property(__pos )
# backward compatibility
coords = positions
get_expect = expect
pass
class scenario(product_measure): #FIXME: meant to only accept sets...
""" a N-d product measure (collection of dirac measures) with values
s = scenario(product_measure, [value1, value2, ..., valueN])
where each point_mass in the product measure is paried with a value
(essentially, a dataset in product_measure representation)
queries:
s.npts -- returns total number of point_masse
s.weights -- returns list of weights
s.positions -- returns list of position tuples
s.values -- returns list of values
s.mass -- returns list of weight norms
s.pts -- returns number of point_masses for each discrete measure
s.wts -- returns list of weights for each discrete measure
s.pos -- returns list of positions for each discrete measure
settings:
s.positions = [(x1,y1,z1),...] -- set positions (tuples in product measure)
s.values = [v1,v2,v3,...] -- set the values (correspond to position tuples)
methods:
s.pof(f) -- calculate the probability of failure
s.pof_value(f) -- calculate the probability of failure using the values
s.sampled_pof(f, npts) -- calculate the pof using sampled points
s.expect(f) -- calculate the expectation
s.set_expect((center,delta), f) -- impose expectation by adjusting positions
s.mean_value() -- calculate the mean values for a scenario
s.set_mean_value(m) -- impose mean value by adjusting values
s.set_feasible(data) -- impose shortness by adjusting positions and values
s.short_wrt_data(data) -- check for shortness with respect to data
s.short_wrt_self(L) -- check for shortness with respect to self
s.set_valid(model) -- impose validity by adjusting positions and values
s.valid_wrt_model(model) -- check for validity with respect to the model
s.flatten() -- convert measure to a flat list of parameters
s.load(params, pts) -- 'fill' the measure from a flat list of parameters
s.update(params) -- 'update' the measure from a flat list of parameters
notes:
- constraints impose expect (center - delta) <= E <= (center + delta)
- constraints impose sum(weights) == 1.0 for each set
- assumes that s.npts = len(s.positions) == len(s.weights)
- weight wxi should be same for each (yj,zk) at xi; similarly for wyi & wzi
"""
def __init__(self, pm=None, values=None):
super(product_measure,self).__init__()
if pm:
pm = product_measure(pm)
self.load(pm.flatten(), pm.pts)
if not values: values = []
self.__Y = values # storage for values of s.positions
return
def __values(self):
return self.__Y
def __set_values(self, values):
self.__Y = values[:]
return
def mean_value(self): # get mean of y's
"""calculate the mean of the associated values for a scenario"""
from mystic.math.measures import mean
return mean(self.values, self.weights)
def set_mean_value(self, m): # set mean of y's
"""set the mean for the associated values of a scenario"""
from mystic.math.measures import impose_mean
self.values = impose_mean(m, self.values, self.weights)
return
def valid_wrt_model(self, model, blamelist=False, pairs=True, \
all=False, raw=False, **kwds):
"""check for scenario validity with respect to the model
Inputs:
model -- the model function, y' = F(x')
blamelist -- if True, report which points are infeasible
pairs -- if True, report indicies of infeasible points
all -- if True, report results for each point (opposed to all points)
raw -- if True, report numerical results (opposed to boolean results)
Additional Inputs:
ytol -- maximum acceptable difference |y - F(x')|; a single value
xtol -- maximum acceptable difference |x - x'|; an iterable or single value
cutoff -- zero out distances less than cutoff; typically: ytol, 0.0, or None
hausdorff -- norm; where if given, ytol = |y - F(x')| + |x - x'|/norm
Notes:
xtol defines the n-dimensional base of a pilar of height ytol, centered at
each point. The region inside the pilar defines the space where a "valid"
model must intersect. If xtol is not specified, then the base of the pilar
will be a dirac at x' = x. This function performs an optimization for each
x to find an appropriate x'. While cutoff and ytol are very tightly related,
they play a distinct role; ytol is used to set the optimization termination
for an acceptable |y - F(x')|, while cutoff is applied post-optimization.
If we are using the hausdorff norm, then ytol will set the optimization
termination for an acceptable |y - F(x')| + |x - x'|/norm, where the x
values are normalized by norm = hausdorff.
"""
from mystic.math.legacydata import dataset
data = dataset()
data.load(self.positions, self.values)
#data.lipschitz = L
for i in range(len(data)):
data[i].id = i
return data.valid(model, blamelist=blamelist, pairs=pairs, \
all=all, raw=raw, **kwds)
def short_wrt_self(self, L, blamelist=False, pairs=True, \
all=False, raw=False, **kwds):
"""check for shortness with respect to the scenario itself
Inputs:
L -- the lipschitz constant
blamelist -- if True, report which points are infeasible
pairs -- if True, report indicies of infeasible points
all -- if True, report results for each point (opposed to all points)
raw -- if True, report numerical results (opposed to boolean results)
Additional Inputs:
tol -- maximum acceptable deviation from shortness
cutoff -- zero out distances less than cutoff; typically: tol, 0.0, or None
Notes:
Each point x,y can be thought to have an associated double-cone with slope
equal to the lipschitz constant. Shortness with respect to another point is
defined by the first point not being inside the cone of the second. We can
allow for some error in shortness, a short tolerance 'tol', for which the
point x,y is some acceptable y-distance inside the cone. While very tightly
related, cutoff and tol play distinct roles; tol is subtracted from
calculation of the lipschitz_distance, while cutoff zeros out the value
of any element less than the cutoff.
"""
from mystic.math.legacydata import dataset
data = dataset()
data.load(self.positions, self.values)
data.lipschitz = L
for i in range(len(data)):
data[i].id = i
return data.short(blamelist=blamelist, pairs=pairs, \
all=all, raw=raw, **kwds)
def short_wrt_data(self, data, L=None, blamelist=False, pairs=True, \
all=False, raw=False, **kwds):
"""check for shortness with respect to the given data
Inputs:
data -- a collection of data points
L -- the lipschitz constant, if different from that provided with data
blamelist -- if True, report which points are infeasible
pairs -- if True, report indicies of infeasible points
all -- if True, report results for each point (opposed to all points)
raw -- if True, report numerical results (opposed to boolean results)
Additional Inputs:
tol -- maximum acceptable deviation from shortness
cutoff -- zero out distances less than cutoff; typically cutoff = tol or 0.0
Notes:
Each point x,y can be thought to have an associated double-cone with slope
equal to the lipschitz constant. Shortness with respect to another point is
defined by the first point not being inside the cone of the second. We can
allow for some error in shortness, a short tolerance 'tol', for which the
point x,y is some acceptable y-distance inside the cone. While very tightly
related, cutoff and tol play distinct roles; tol is subtracted from
calculation of the lipschitz_distance, while cutoff zeros out the value
of any element less than the cutoff.
"""
from mystic.math.legacydata import dataset
_self = dataset()
_self.load(self.positions, self.values)
_self.lipschitz = data.lipschitz
for i in range(len(_self)):
_self[i].id = i
return _self.short(data, L=L, blamelist=blamelist, pairs=pairs, \
all=all, raw=raw, **kwds)
def set_feasible(self, data, cutoff=0.0, bounds=None, constraints=None, \
with_self=True, **kwds):
"""impose shortness on a scenario with respect to given data points
Inputs:
data -- a collection of data points
cutoff -- acceptable deviation from shortness
Additional Inputs:
with_self -- if True, shortness will also be imposed with respect to self
tol -- acceptable optimizer termination before sum(infeasibility) = 0.
bounds -- a tuple of sample bounds: bounds = (lower_bounds, upper_bounds)
constraints -- a function that takes a flat list parameters
x' = constraints(x)
"""
# imposes: is_short(x, x'), is_short(x, z )
# use additional 'constraints' kwds to impose: y >= m, norm(wi) = 1.0
pm = impose_feasible(cutoff, data, guess=self.pts, bounds=bounds, \
constraints=constraints, with_self=with_self, **kwds)
self.update( pm.flatten(all=True) )
return
def set_valid(self, model, cutoff=0.0, bounds=None, constraints=None, **kwds):
"""impose validity on a scenario with respect to given data points
Inputs:
model -- the model function, y' = F(x'), that approximates reality, y = G(x)
cutoff -- acceptable model invalidity |y - F(x')|
Additional Inputs:
hausdorff -- norm; where if given, ytol = |y - F(x')| + |x - x'|/norm
xtol -- acceptable pointwise graphical distance of model from reality
tol -- acceptable optimizer termination before sum(infeasibility) = 0.
bounds -- a tuple of sample bounds: bounds = (lower_bounds, upper_bounds)
constraints -- a function that takes a flat list parameters
x' = constraints(x)
Notes:
xtol defines the n-dimensional base of a pilar of height cutoff, centered at
each point. The region inside the pilar defines the space where a "valid"
model must intersect. If xtol is not specified, then the base of the pilar
will be a dirac at x' = x. This function performs an optimization to find
a set of points where the model is valid. Here, tol is used to set the
optimization termination for the sum(graphical_distances), while cutoff is
used in defining the graphical_distance between x,y and x',F(x').
"""
# imposes is_feasible(R, Cv), where R = graphical_distance(model, pts)
# use additional 'constraints' kwds to impose: y >= m, norm(wi) = 1.0
pm = impose_valid(cutoff, model, guess=self, \
bounds=bounds, constraints=constraints, **kwds)
self.update( pm.flatten(all=True) )
return
def pof_value(self, f):
"""calculate probability of failure over a given function, f,
where f takes a list of (scenario) values and returns a single value
Inputs:
f -- a function that returns True for 'success' and False for 'failure'
"""
u = 0
set = zip(self.values, self.weights)
for x in set:
if f(x[0]) <= 0.0:
u += x[1]
return u
def update(self, params): #XXX: overwritten. create standalone instead ?
"""update the scenario from a list of parameters
The dimensions of the scenario will not change"""
pts = self.pts
_len = 2 * sum(pts)
if len(params) > _len: # if Y-values are appended to params
params, values = params[:_len], params[_len:]
self.values = values[:len(self.values)] + self.values[len(values):]
pm = unflatten(params, pts)
zo = pm.count([])
self[:] = pm[:len(self) - zo] + self[len(pm) - zo:]
return
def load(self, params, pts): #XXX: overwritten. create standalone instead ?
"""load a list of parameters corresponding to N x 1D discrete measures
Inputs:
params -- a list of parameters (see 'notes')
pts -- number of points in each of the underlying discrete measures
Notes:
To append len(pts) new discrete measures to scenario c, where
pts = (M, N, ...)
params = [wt_x1, ..., wt_xM, \
x1, ..., xM, \
wt_y1, ..., wt_yN, \
y1, ..., yN, \
...]
Thus, the provided list is M weights and the corresponding M positions,
followed by N weights and the corresponding N positions, with this
pattern followed for each new dimension desired for the scenario.
"""
_len = 2 * sum(pts)
if len(params) > _len: # if Y-values are appended to params
params, self.values = params[:_len], params[_len:]
self.extend( unflatten(params, pts) )
return
def flatten(self, all=True): #XXX: overwritten. create standalone instead ?
"""flatten the scenario into a list of parameters
Returns:
params -- a list of parameters (see 'notes')
Notes:
For a scenario c where c.pts = (M, N, ...), then
params = [wt_x1, ..., wt_xM, \
x1, ..., xM, \
wt_y1, ..., wt_yN, \
y1, ..., yN, \
...]
Thus, the returned list is M weights and the corresponding M positions,
followed by N weights and the corresponding N positions, with this
pattern followed for each dimension of the scenario.
"""
params = flatten(self)
if all: params.extend(self.values) # if Y-values, return those as well
return params
# interface
values = property(__values, __set_values )
get_mean_value = mean_value
pass
#---------------------------------------------
# creators and destructors from parameter list
def _mimic(samples, weights):
"""Generate a product_measure object from a list of N product measure
positions and a list of N weights. The resulting product measure will
mimic the original product measure's statistics, but be larger in size.
For example:
>>> smp = [[-6,3,6],[-2,4],[1]]
>>> wts = [[.4,.2,.4],[.5,.5],[1.]]
>>> c = compose(samples, weights)
>>> d = _mimic(c.positions, c.weights)
>>> c[0].center_mass == d[0].center_mass
True
>>> c[1].range == d[1].range
True
>>> c.npts == d.npts
False
>>> c.npts == d[0].npts
True
"""
x = zip(*samples) # 'mimic' to a nested list
w = [weights for i in range(len(x))] # 'mimic' to a nested list
return compose(x,w)
def _uniform_weights(samples):
"""generate a nested list of N x 1D weights from a nested list of N x 1D
discrete measure positions, where the weights have norm 1.0 and are uniform.
>>> c.pos
[[1, 2, 3], [4, 5], [6]]
>>> _uniform_weights(c.pos)
[[0.333333333333333, 0.333333333333333, 0.333333333333333], [0.5, 0.5], [1.0]]
"""
from mystic.math.measures import normalize
return [normalize([1.]*len(xi)) for xi in samples]
def _list_of_measures(samples, weights=None):
"""generate a list of N x 1D discrete measures from a nested list of N x 1D
discrete measure positions and a nested list of N x 1D weights.
Note this function does not return a product measure, it returns a list."""
total = []
if not weights: weights = _uniform_weights(samples)
for i in range(len(samples)):
next = measure()
for j in range(len(samples[i])):
next.append(point_mass( samples[i][j], weights[i][j] ))
total.append(next)
return total
def compose(samples, weights=None):
"""Generate a product_measure object from a nested list of N x 1D
discrete measure positions and a nested list of N x 1D weights. If weights
are not provided, a uniform distribution with norm = 1.0 will be used."""
if not weights: weights = _uniform_weights(samples)
total = _list_of_measures(samples, weights)
c = product_measure(total)
return c
def decompose(c):
"""Decomposes a product_measure object into a nested list of
N x 1D discrete measure positions and a nested list of N x 1D weights."""
from mystic.math.measures import _nested_split
w, x = _nested_split(flatten(c), c.pts)
return x, w
#def expand(data, npts):
# """Generate a scenario object from a dataset. The scenario will have
#uniformly distributed weights and have dimensions given by pts."""
# positions,values = data.fetch()
# from mystic.math.measures import _unpack
# pm = compose( _unpack(positions, npts) )
# return scenario(pm, values[:pm.npts])
def unflatten(params, npts):
"""Map a list of random variables to N x 1D discrete measures
in a product_measure object."""
from mystic.math.measures import _nested_split
w, x = _nested_split(params, npts)
return compose(x, w)
from itertools import chain #XXX: faster, but sloppy to have as importable
def flatten(c):
"""Flattens a product_measure object into a list."""
rv = [(i.weights,i.positions) for i in c]
# now flatten list of lists into just a list
return list(chain(*chain(*rv))) # faster than mystic.tools.flatten
##### bounds-conserving-mean: borrowed from seismic/seismic.py #####
def bounded_mean(mean_x, samples, xmin, xmax, wts=None):
from mystic.math.measures import impose_mean, impose_spread
from mystic.math.measures import spread, mean
from numpy import asarray
a = impose_mean(mean_x, samples, wts)
if min(a) < xmin: # maintain the bound
#print "violate lo(a)"
s = spread(a) - 2*(xmin - min(a)) #XXX: needs compensation (as below) ?
a = impose_mean(mean_x, impose_spread(s, samples, wts), wts)
if max(a) > xmax: # maintain the bound
#print "violate hi(a)"
s = spread(a) + 2*(xmax - max(a)) #XXX: needs compensation (as below) ?
a = impose_mean(mean_x, impose_spread(s, samples, wts), wts)
return asarray(a)
#####################################################################
#--------------------------------------------------
# constraints solvers and factories for feasibility
# used in self-consistent constraints function c(x) for
# is_short(x, x') and is_short(x, z)
def norm_wts_constraintsFactory(pts):
"""factory for a constraints function that:
- normalizes weights
"""
#from measure import scenario
def constrain(rv):
"constrain: sum(wi)_{k} = 1 for each k in K"
pm = scenario()
pm.load(rv, pts) # here rv is param: w,x,y
#impose: sum(wi)_{k} = 1 for each k in K
norm = 1.0
for i in range(len(pm)):
w = pm[i].weights
w[-1] = norm - sum(w[:-1])
pm[i].weights = w
rv = pm.flatten(all=True)
return rv
return constrain
# used in self-consistent constraints function c(x) for
# is_short(x, x'), is_short(x, z), and y >= m
def mean_y_norm_wts_constraintsFactory(target, pts):
"""factory for a constraints function that:
- imposes a mean on scenario values
- normalizes weights
"""
#from measure import scenario
from mystic.math.measures import mean, impose_mean
#target[0] is target mean
#target[1] is acceptable deviation
def constrain(rv):
"constrain: y >= m and sum(wi)_{k} = 1 for each k in K"
pm = scenario()
pm.load(rv, pts) # here rv is param: w,x,y
#impose: sum(wi)_{k} = 1 for each k in K
norm = 1.0
for i in range(len(pm)):
w = pm[i].weights
w[-1] = norm - sum(w[:-1])
pm[i].weights = w
#impose: y >= m
values, weights = pm.values, pm.weights
y = float(mean(values, weights))
if not (y >= float(target[0])):
pm.values = impose_mean(target[0]+target[1], values, weights)
rv = pm.flatten(all=True)
return rv
return constrain
def impose_feasible(cutoff, data, guess=None, **kwds):
"""impose shortness on a given list of parameters w,x,y.
Optimization on w,x,y over the given bounds seeks sum(infeasibility) = 0.
(this function is not ???-preserving)
Inputs:
cutoff -- maximum acceptable deviation from shortness
data -- a dataset of observed points (these points are 'static')
guess -- the scenario providing an initial guess at feasibility,
or a tuple of dimensions of the target scenario
Additional Inputs:
tol -- acceptable optimizer termination before sum(infeasibility) = 0.
bounds -- a tuple of sample bounds: bounds = (lower_bounds, upper_bounds)
constraints -- a function that takes a flat list parameters
x' = constraints(x)
Outputs:
pm -- a scenario with desired shortness
"""
from numpy import sum, asarray
from mystic.math.legacydata import dataset
from mystic.math.distance import lipschitz_distance, infeasibility, _npts
if guess is None:
message = "Requires a guess scenario, or a tuple of scenario dimensions."
raise TypeError, message
# get initial guess
if hasattr(guess, 'pts'): # guess is a scenario
pts = guess.pts # number of x
guess = guess.flatten(all=True)
else:
pts = guess # guess is given as a tuple of 'pts'
guess = None
npts = _npts(pts) # number of Y
long_form = len(pts) - list(pts).count(2) # can use '2^K compressed format'
# prepare bounds for solver
bounds = kwds.pop('bounds', None)
# if bounds are not set, use the default optimizer bounds
if bounds is None:
lower_bounds = []; upper_bounds = []
for n in pts: # bounds for n*x in each dimension (x2 due to weights)
lower_bounds += [None]*n * 2
upper_bounds += [None]*n * 2
# also need bounds for npts*y values
lower_bounds += [None]*npts
upper_bounds += [None]*npts
bounds = lower_bounds, upper_bounds
bounds = asarray(bounds).T
# plug in the 'constraints' function: param' = constraints(param)
# constraints should impose_mean(y,w), and possibly sum(weights)
constraints = kwds.pop('constraints', None) # default is no constraints
if not constraints: # if None (default), there are no constraints
constraints = lambda x: x
_self = kwds.pop('with_self', True) # default includes self in shortness
if _self is not False: _self = True
# tolerance for optimization on sum(y)
tol = kwds.pop('tol', 0.0) # default
npop = kwds.pop('npop', 20) #XXX: tune npop?
maxiter = kwds.pop('maxiter', 1000) #XXX: tune maxiter?
# if no guess was made, then use bounds constraints
if guess is None:
if npop:
guess = bounds
else: # fmin_powell needs a list params (not bounds)
guess = [(a + b)/2. for (a,b) in bounds]
# construct cost function to reduce sum(lipschitz_distance)
def cost(rv):
"""compute cost from a 1-d array of model parameters,
where: cost = | sum(lipschitz_distance) | """
_data = dataset()
_pm = scenario()
_pm.load(rv, pts) # here rv is param: w,x,y
if not long_form:
positions = _pm.select(*range(npts))
else: positions = _pm.positions
_data.load( data.coords, data.values ) # LOAD static
if _self:
_data.load( positions, _pm.values ) # LOAD dynamic
_data.lipschitz = data.lipschitz # LOAD L
Rv = lipschitz_distance(_data.lipschitz, _pm, _data, tol=cutoff, **kwds)
v = infeasibility(Rv, cutoff)
return abs(sum(v))
# construct and configure optimizer
debug = False #!!!
maxfun = 1e+6
crossover = 0.9; percent_change = 0.9
ftol = abs(tol); gtol = None
if debug:
print "lower bounds: %s" % bounds.T[0]
print "upper bounds: %s" % bounds.T[1]
# print "initial value: %s" % guess
# use optimization to get feasible points
from mystic.solvers import diffev2, fmin_powell
from mystic.monitors import Monitor, VerboseMonitor
from mystic.strategy import Best1Bin, Best1Exp
evalmon = Monitor(); stepmon = Monitor(); strategy = Best1Exp
if debug: stepmon = VerboseMonitor(10) #!!!
if npop: # use VTR
results = diffev2(cost, guess, npop, ftol=ftol, gtol=gtol, bounds=bounds,\
maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
cross=crossover, scale=percent_change, strategy=strategy,\
evalmon=evalmon, itermon=stepmon,\
full_output=1, disp=0, handler=False)
else: # use VTR
results = fmin_powell(cost, guess, ftol=ftol, gtol=gtol, bounds=bounds,\
maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
evalmon=evalmon, itermon=stepmon,\
full_output=1, disp=0, handler=False)
# repack the results
pm = scenario()
pm.load(results[0], pts) # params: w,x,y
#if debug: print "final cost: %s" % results[1]
if debug and results[2] >= maxiter: # iterations
print "Warning: constraints solver terminated at maximum iterations"
#func_evals = results[3] # evaluation
return pm
def impose_valid(cutoff, model, guess=None, **kwds):
"""impose model validity on a given list of parameters w,x,y
Optimization on w,x,y over the given bounds seeks sum(infeasibility) = 0.
(this function is not ???-preserving)
Inputs:
cutoff -- maximum acceptable model invalidity |y - F(x')|; a single value
model -- the model function, y' = F(x'), that approximates reality, y = G(x)
guess -- the scenario providing an initial guess at validity,
or a tuple of dimensions of the target scenario
Additional Inputs:
hausdorff -- norm; where if given, ytol = |y - F(x')| + |x - x'|/norm
xtol -- acceptable pointwise graphical distance of model from reality
tol -- acceptable optimizer termination before sum(infeasibility) = 0.
bounds -- a tuple of sample bounds: bounds = (lower_bounds, upper_bounds)
constraints -- a function that takes a flat list parameters
x' = constraints(x)
Outputs:
pm -- a scenario with desired model validity
Notes:
xtol defines the n-dimensional base of a pilar of height cutoff, centered at
each point. The region inside the pilar defines the space where a "valid"
model must intersect. If xtol is not specified, then the base of the pilar
will be a dirac at x' = x. This function performs an optimization to find
a set of points where the model is valid. Here, tol is used to set the
optimization termination for the sum(graphical_distances), while cutoff is
used in defining the graphical_distance between x,y and x',F(x').
"""
from numpy import sum as _sum, asarray
from mystic.math.distance import graphical_distance, infeasibility, _npts
if guess is None:
message = "Requires a guess scenario, or a tuple of scenario dimensions."
raise TypeError, message
# get initial guess
if hasattr(guess, 'pts'): # guess is a scenario
pts = guess.pts # number of x
guess = guess.flatten(all=True)
else:
pts = guess # guess is given as a tuple of 'pts'
guess = None
npts = _npts(pts) # number of Y
# prepare bounds for solver
bounds = kwds.pop('bounds', None)
# if bounds are not set, use the default optimizer bounds
if bounds is None:
lower_bounds = []; upper_bounds = []
for n in pts: # bounds for n*x in each dimension (x2 due to weights)
lower_bounds += [None]*n * 2
upper_bounds += [None]*n * 2
# also need bounds for npts*y values
lower_bounds += [None]*npts
upper_bounds += [None]*npts
bounds = lower_bounds, upper_bounds
bounds = asarray(bounds).T
# plug in the 'constraints' function: param' = constraints(param)
constraints = kwds.pop('constraints', None) # default is no constraints
if not constraints: # if None (default), there are no constraints
constraints = lambda x: x
# 'wiggle room' tolerances
ipop = kwds.pop('ipop', 10) #XXX: tune ipop (inner optimization)?
imax = kwds.pop('imax', 10) #XXX: tune imax (inner optimization)?
# tolerance for optimization on sum(y)
tol = kwds.pop('tol', 0.0) # default
npop = kwds.pop('npop', 20) #XXX: tune npop (outer optimization)?
maxiter = kwds.pop('maxiter', 1000) #XXX: tune maxiter (outer optimization)?
# if no guess was made, then use bounds constraints
if guess is None:
if npop:
guess = bounds
else: # fmin_powell needs a list params (not bounds)
guess = [(a + b)/2. for (a,b) in bounds]
# construct cost function to reduce sum(infeasibility)
def cost(rv):
"""compute cost from a 1-d array of model parameters,
where: cost = | sum( infeasibility ) | """
# converting rv to scenario
points = scenario()
points.load(rv, pts)
# calculate infeasibility
Rv = graphical_distance(model, points, ytol=cutoff, ipop=ipop, \
imax=imax, **kwds)
v = infeasibility(Rv, cutoff)
# converting v to E
return _sum(v) #XXX: abs ?
# construct and configure optimizer
debug = False #!!!
maxfun = 1e+6
crossover = 0.9; percent_change = 0.8
ftol = abs(tol); gtol = None #XXX: optimally, should be VTRCOG...
if debug:
print "lower bounds: %s" % bounds.T[0]
print "upper bounds: %s" % bounds.T[1]
# print "initial value: %s" % guess
# use optimization to get model-valid points
from mystic.solvers import diffev2, fmin_powell
from mystic.monitors import Monitor, VerboseMonitor
from mystic.strategy import Best1Bin, Best1Exp
evalmon = Monitor(); stepmon = Monitor(); strategy = Best1Exp
if debug: stepmon = VerboseMonitor(2) #!!!
if npop: # use VTR
results = diffev2(cost, guess, npop, ftol=ftol, gtol=gtol, bounds=bounds,\
maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
cross=crossover, scale=percent_change, strategy=strategy,\
evalmon=evalmon, itermon=stepmon,\
full_output=1, disp=0, handler=False)
else: # use VTR
results = fmin_powell(cost, guess, ftol=ftol, gtol=gtol, bounds=bounds,\
maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
evalmon=evalmon, itermon=stepmon,\
full_output=1, disp=0, handler=False)
# repack the results
pm = scenario()
pm.load(results[0], pts) # params: w,x,y
#if debug: print "final cost: %s" % results[1]
if debug and results[2] >= maxiter: # iterations
print "Warning: constraints solver terminated at maximum iterations"
#func_evals = results[3] # evaluation
return pm
# backward compatibility
point = point_mass
dirac_measure = measure
if __name__ == '__main__':
from mystic.math.distance import *
model = lambda x:sum(x)
a = [0,1,9,8, 1,0,4,6, 1,0,1,2, 0,1,2,3,4,5,6,7]
feasability = 0.0; deviation = 0.01
validity = 5.0; wiggle = 1.0
y_mean = 5.0; y_buffer = 0.0
L = [.75,.5,.25]
bc = [(0,7,2),(3,0,2),(2,0,3),(1,0,3),(2,4,2)]
bv = [5,3,1,4,8]
pts = (2,2,2)
from mystic.math.legacydata import dataset
data = dataset()
data.load(bc, bv)
data.lipschitz = L
pm = scenario()
pm.load(a, pts)
pc = pm.positions
pv = pm.values
#---
_data = dataset()
_data.load(bc, bv)
_data.load(pc, pv)
_data.lipschitz = data.lipschitz
from numpy import sum
ans = sum(lipschitz_distance(L, pm, _data))
print "original: %s @ %s\n" % (ans, a)
#print "pm: %s" % pm
#print "data: %s" % data
#---
lb = [0,.5,-100,-100, 0,.5,-100,-100, 0,.5,-100,-100, 0,0,0,0,0,0,0,0]
ub = [.5,1, 100, 100, .5,1, 100, 100, .5,1, 100, 100, 9,9,9,9,9,9,9,9]
bounds = (lb,ub)
_constrain = mean_y_norm_wts_constraintsFactory((y_mean,y_buffer), pts)
results = impose_feasible(feasability, data, guess=pts, tol=deviation, \
bounds=bounds, constraints=_constrain)
from mystic.math.measures import mean
print "solved: %s" % results.flatten(all=True)
print "mean(y): %s >= %s" % (mean(results.values, results.weights), y_mean)
print "sum(wi): %s == 1.0" % [sum(w) for w in results.wts]
print "\n---------------------------------------------------\n"
bc = bc[:-2]
ids = ['1','2','3']
t = dataset()
t.load(bc, map(model, bc), ids)
t.update(t.coords, map(model, t.coords))
# r = dataset()
# r.load(t.coords, t.values)
# L = [0.1, 0.0, 0.0]
print "%s" % t
print "L: %s" % L
print "shortness:"
print lipschitz_distance(L, t, t, tol=0.0)
print "\n---------------------------------------------------\n"
print "Y: %s" % str(results.values)
print "sum(wi): %s == 1.0" % [sum(w) for w in results.wts]
# EOF
|
jcfr/mystic
|
_math/discrete.py
|
Python
|
bsd-3-clause
| 50,522
|
[
"DIRAC"
] |
5a674cf50e348364cc9c01523b0e3a2e1e31164d21c196bd67ae7cb2d039c8f9
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.388828
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/mobile/channelinfo.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class channelinfo(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(channelinfo, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<html>\r
<head>\r
\t<title>OpenWebif</title>\r
\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
\t<meta name="viewport" content="user-scalable=no, width=device-width"/>\r
\t<meta name="apple-mobile-web-app-capable" content="yes" />\r
\t<link rel="stylesheet" type="text/css" href="/css/jquery.mobile-1.0.min.css" media="screen"/>\r
\t<link rel="stylesheet" type="text/css" href="/css/iphone.css" media="screen"/>\r
\t<script src="/js/jquery-1.6.2.min.js"></script>\r
\t<script src="/js/jquery.mobile-1.0.min.js"></script>\r
</head>\r
<body> \r
\t<div data-role="page">\r
\r
\t\t<div id="header">\r
\t\t\t<div class="button" onClick="history.back()">''')
_v = VFFSL(SL,"tstrings",True)['back'] # u"$tstrings['back']" on line 18, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['back']")) # from line 18, col 49.
write(u'''</div>\r
\t\t\t<h1><a style="color:#FFF;text-decoration:none;" href=\'/mobile\'>OpenWebif</a></h1>
''')
link = quote('/mobile/channelzap?sref=' + VFFSL(SL,"channelinfo.sref",True), safe=' ~@#$&()*!+=:;,.?/\'')
write(u'''\t\t\t<div class="button" style="right:5px;left:auto;" onClick="window.open(\'''')
_v = VFFSL(SL,"link",True) # u'$link' on line 21, col 75
if _v is not None: write(_filter(_v, rawExpr=u'$link')) # from line 21, col 75.
write(u'''\');return false;">''')
_v = VFFSL(SL,"tstrings",True)['zap'] # u"$tstrings['zap']" on line 21, col 98
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['zap']")) # from line 21, col 98.
write(u'''</div>\r
\t\t</div>\r
\t\t<div id="mainContent" class="ui-corner-all">\r
\t\t\t<table width="100%" border="0" cellspacing="1" cellpadding="5">\r
\t\t\t\t\t\t<tr>\r
\t\t\t\t\t\t\t<th colspan="3" class="ui-btn-up-b" style="text-align: left;">''')
_v = VFFSL(SL,"tstrings",True)['service'] # u"$tstrings['service']" on line 26, col 70
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['service']")) # from line 26, col 70.
write(u'''</th>\r
\t\t\t\t\t\t</tr>\r
\t\t\t\t\t\t<tr style="background-color: #f0f7fc;">\r
\t\t\t\t\t\t\t<td width="200"><img src="''')
_v = VFFSL(SL,"channelinfo.picon",True) # u'$channelinfo.picon' on line 29, col 34
if _v is not None: write(_filter(_v, rawExpr=u'$channelinfo.picon')) # from line 29, col 34.
write(u'''" border="0" alt=""></td>\r
\t\t\t\t\t\t\t<td width="100%" valign="top">\r
\t\t\t\t\t\t\t<strong>''')
_v = VFFSL(SL,"channelinfo.sname",True) # u'$channelinfo.sname' on line 31, col 16
if _v is not None: write(_filter(_v, rawExpr=u'$channelinfo.sname')) # from line 31, col 16.
write(u'''</strong><br />\r
''')
if VFFSL(SL,"channelinfo.title",True): # generated from line 32, col 8
write(u'''\t\t\t\t\t\t\t\t''')
_v = VFFSL(SL,"channelinfo.title",True) # u'$channelinfo.title' on line 33, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$channelinfo.title')) # from line 33, col 9.
write(u'''<br />\r
\t\t\t\t\t\t\t\t''')
_v = VFFSL(SL,"channelinfo.begin",True) # u'$channelinfo.begin' on line 34, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$channelinfo.begin')) # from line 34, col 9.
write(u'''-''')
_v = VFFSL(SL,"channelinfo.end",True) # u'$channelinfo.end' on line 34, col 28
if _v is not None: write(_filter(_v, rawExpr=u'$channelinfo.end')) # from line 34, col 28.
write(u''' (''')
_v = VFFSL(SL,"channelinfo.duration",True) # u'$channelinfo.duration' on line 34, col 46
if _v is not None: write(_filter(_v, rawExpr=u'$channelinfo.duration')) # from line 34, col 46.
write(u''' min)<br />\r
''')
write(u'''\t\t\t\t\t\t\t</td>\r
\t\t\t\t\t\t</tr>\r
\t\t\t\t\t\t<tr>\r
\t\t\t\t\t\t\t<th colspan="2" class="ui-btn-up-b" style="text-align: left;">''')
_v = VFFSL(SL,"tstrings",True)['current_event'] # u"$tstrings['current_event']" on line 39, col 70
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['current_event']")) # from line 39, col 70.
write(u'''</th>\r
\t\t\t\t\t\t</tr>\r
''')
if VFN(VFFSL(SL,"channelinfo",True),"has_key",False)('shortdesc'): # generated from line 41, col 7
write(u'''\t\t\t\t\t\t<tr style="background-color: #f0f7fc;">\r
\t\t\t\t\t\t\t<td colspan="2">''')
_v = VFFSL(SL,"channelinfo.shortdesc",True) # u'$channelinfo.shortdesc' on line 43, col 24
if _v is not None: write(_filter(_v, rawExpr=u'$channelinfo.shortdesc')) # from line 43, col 24.
write(u'''</td>\r
\t\t\t\t\t\t</tr>\r
''')
if VFN(VFFSL(SL,"channelinfo",True),"has_key",False)('longdesc'): # generated from line 46, col 7
write(u'''\t\t\t\t\t\t<tr style="background-color: #f0f7fc;">\r
\t\t\t\t\t\t\t<td colspan="2">''')
_v = VFFSL(SL,"channelinfo.longdesc",True) # u'$channelinfo.longdesc' on line 48, col 24
if _v is not None: write(_filter(_v, rawExpr=u'$channelinfo.longdesc')) # from line 48, col 24.
write(u'''</td>\r
\t\t\t\t\t\t</tr>\r
''')
write(u'''\t\t\t</table>\r
\t\t</div>\r
\t\t\r
''')
if VFFSL(SL,"channelepg",True): # generated from line 54, col 3
write(u'''\t\t<div id="contentContainer">\r
\t\t\t<ul data-role="listview" data-inset="true" data-theme="d">\r
\t\t\t\t<li data-role="list-divider" role="heading" data-theme="b">''')
_v = VFFSL(SL,"tstrings",True)['upcoming_events'] # u"$tstrings['upcoming_events']" on line 57, col 64
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['upcoming_events']")) # from line 57, col 64.
write(u'''</li>\r
''')
for event in VFFSL(SL,"channelepg",True): # generated from line 58, col 5
write(u'''\t\t\t\t<li style="padding: 3px;">\r
\t\t\t\t\t<a href="/mobile/eventview?eventid=''')
_v = VFFSL(SL,"event.id",True) # u'$event.id' on line 60, col 41
if _v is not None: write(_filter(_v, rawExpr=u'$event.id')) # from line 60, col 41.
write(u'''&eventref=''')
_v = VFFSL(SL,"event.sref",True) # u'$event.sref' on line 60, col 60
if _v is not None: write(_filter(_v, rawExpr=u'$event.sref')) # from line 60, col 60.
write(u'''" style="padding: 3px;">\r
\t\t\t\t\t\t<span class="ui-li-heading" style="margin-top: 0px; margin-bottom: 3px;">''')
_v = VFFSL(SL,"event.title",True) # u'$event.title' on line 61, col 80
if _v is not None: write(_filter(_v, rawExpr=u'$event.title')) # from line 61, col 80.
write(u'''</span>\r
\t\t\t\t\t\t<span class="ui-li-desc" style="margin-bottom: 0px;">''')
_v = VFFSL(SL,"event.begin",True) # u'$event.begin' on line 62, col 60
if _v is not None: write(_filter(_v, rawExpr=u'$event.begin')) # from line 62, col 60.
write(u''' - ''')
_v = VFFSL(SL,"event.end",True) # u'$event.end' on line 62, col 75
if _v is not None: write(_filter(_v, rawExpr=u'$event.end')) # from line 62, col 75.
write(u'''</span>\r
\t\t\t\t\t</a>\t\r
\t\t\t\t</li>\r
''')
write(u'''\t\t\t</ul>\r
\t\t</div>\r
''')
write(u'''\r
\t\t<div id="footer">\r
\t\t\t<p>OpenWebif Mobile</p>\r
\t\t\t<a onclick="document.location.href=\'/index?mode=fullpage\';return false;" href="#">''')
_v = VFFSL(SL,"tstrings",True)['show_full_openwebif'] # u"$tstrings['show_full_openwebif']" on line 72, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['show_full_openwebif']")) # from line 72, col 86.
write(u'''</a>\r
\t\t</div>\r
\t\t\r
\t</div>\r
</body>\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_channelinfo= 'respond'
## END CLASS DEFINITION
if not hasattr(channelinfo, '_initCheetahAttributes'):
templateAPIClass = getattr(channelinfo, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(channelinfo)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=channelinfo()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/mobile/channelinfo.py
|
Python
|
gpl-2.0
| 12,244
|
[
"VisIt"
] |
e0b4a73f016d47c4934e84dd7f9e32311891af979aa45e1e5e61f39c3cb069b9
|
# -*- coding: utf-8 -*-
"""Implements indexer for building Tophat references."""
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
import tempfile
import shutil
import pathlib2 as pathlib
from imfusion.external.bowtie import bowtie_index
from imfusion.external.tophat import tophat2_index_transcriptome
from .base import Indexer, register_indexer, Reference
class TophatIndexer(Indexer):
"""Indexer that builds references for the Tophat-Fusion aligner.
Performs the same steps as the base ``Indexer`` class, but additionally
generates an index for alignment with Tophat-Fusion using Bowtie and
Tophat2.
"""
def __init__(self, logger=None, skip_index=False):
super().__init__(logger=logger, skip_index=False)
@property
def _reference_class(self):
"""Reference class to use for this indexer."""
return TophatReference
@property
def dependencies(self):
"""External dependencies required by this indexer."""
return ['bowtie-build', 'tophat2']
def _build_indices(self, reference):
# type: (TophatReference) -> None
# Build bowtie index.
self._logger.info('Building bowtie index')
bowtie_idx_path = reference.index_path
bowtie_log_path = reference.base_path / 'bowtie.log'
bowtie_index(
reference_path=reference.fasta_path,
output_base_path=bowtie_idx_path,
log_path=bowtie_log_path)
# Build transcriptome index.
self._logger.info('Building transcriptome index')
transcriptome_path = reference.transcriptome_path
transcriptome_log_path = reference.base_path / 'transcriptome.log'
tophat2_index_transcriptome(
bowtie_index_path=bowtie_idx_path,
gtf_path=reference.gtf_path,
output_base_path=transcriptome_path,
log_path=transcriptome_log_path)
register_indexer('tophat', TophatIndexer)
class TophatReference(Reference):
"""Tophat Reference class.
Defines paths to files within the Tophat-Fusion reference. Compared to the
base reference, this class adds an additional path to the transcriptome
index that is used by Tophat during alignment.
"""
@property
def transcriptome_path(self):
# type: (...) -> pathlib.Path
"""Path to transcriptome index."""
return self._reference / 'transcriptome'
@property
def index_path(self):
# type: (...) -> pathlib.Path
"""Path to index."""
return self._reference / 'reference'
|
NKI-CCB/imfusion
|
src/imfusion/build/indexers/tophat.py
|
Python
|
mit
| 2,753
|
[
"Bowtie"
] |
8ddce046ea3c058828051856b44f435aca221072d093b049238dea188d414519
|
#!/usr/bin/env python
'''
User defined XC functional
See also
* The parser parse_xc function implemented in pyscf.dft.libxc
* Example 24-define_xc_functional.py to input a functional which is not
provided in Libxc or XcFun library.
'''
from pyscf import gto
from pyscf import dft
mol = gto.M(
atom = '''
O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587 ''',
basis = 'ccpvdz')
#
# DFT can parse the custom XC functional, following the rules:
# * The given functional description must be a one-line string.
# * The functional description is case-insensitive.
# * The functional description string has two parts, separated by ",". The
# first part describes the exchange functional, the second is the correlation
# functional.
# - If "," was not presented in string, the entire string is treated as a
# compound XC functional (including both X and C functionals, such as b3lyp).
# - To input only X functional (without C functional), leave the second part
# blank. E.g. description='slater,' for pure LDA functional.
# - To neglect the X functional (only input C functional), leave the first
# part blank. E.g. description=',vwn' means pure VWN functional
# - If compound XC functional is specified, no matter whehter it is in the X
# part (the string in front of comma) or the C part (the string behind
# comma), both X and C functionals of the compound XC functional will be
# used.
# * The functional name can be placed in arbitrary order. Two names needs to
# be separated by operators + or -. Blank spaces are ignored.
# NOTE the parser only reads operators + - *. / is not supported.
# * A functional name can have at most one factor. If the factor is not
# given, it is set to 1. Compound functional can be scaled as a unit. For
# example '0.5*b3lyp' is equivalent to
# 'HF*0.1 + .04*LDA + .36*B88, .405*LYP + .095*VWN'
# * String "HF" stands for exact exchange (HF K matrix). It is allowed to
# put "HF" in C (correlation) functional part.
# * String "RSH" means range-separated operator. Its format is
# RSH(omega, alpha, beta). Another way to input RSH is to use keywords
# SR_HF and LR_HF: "SR_HF(0.1) * alpha_plus_beta" and "LR_HF(0.1) *
# alpha" where the number in parenthesis is the value of omega.
# * Be careful with the libxc convention on GGA functional, in which the LDA
# contribution has been included.
mf = dft.RKS(mol)
mf.xc = 'HF*0.2 + .08*LDA + .72*B88, .81*LYP + .19*VWN'
e1 = mf.kernel()
print('E = %.15g ref = -76.3832244350081' % e1)
#
# No correlation functional
#
mf.xc = '.2*HF + .08*LDA + .72*B88'
e1 = mf.kernel()
print('E = %.15g ref = -75.9807850596666' % e1)
#
# If not given, the factor for each functional equals 1 by default.
#
mf = dft.RKS(mol)
mf.xc = 'b88,lyp'
e1 = mf.kernel()
mf = dft.RKS(mol)
mf.xc = 'b88*1,lyp*1'
eref = mf.kernel()
print('%.15g == %.15g' % (e1, eref))
#
# Compound functionals can be used as part of the definition
#
mf = dft.RKS(mol)
mf.xc = '0.5*b3lyp, 0.5*lyp'
e1 = mf.kernel()
print('E = %.15g ref = -71.9508340443282' % e1)
# Compound XC functional can be presented in the C part (the string behind
# comma). Both X and C functionals of the compound XC functional will be used.
# Compound XC functional can be scaled as a unit.
#
mf = dft.RKS(mol)
mf.xc = '0.5*b3lyp, 0.5*b3p86'
e1 = mf.kernel()
mf = dft.RKS(mol)
mf.xc = '0.5*b3lyp + 0.5*b3p86'
e2 = mf.kernel()
print('E1 = %.15g E2 = %.15g ref = -76.3923625924023' % (e1, e2))
#
# More examples of customized functionals. NOTE These customized functionals
# are presented for the purpose of demonstrating the feature of the XC input.
# They are not reported in any literature. DO NOT use them in the actual
# calculations.
#
# Half HF exchange plus half B3LYP plus half VWN functional
mf.xc = '.5*HF+.5*B3LYP,VWN*.5'
# "-" to subtract one functional from another
mf.xc = 'B88 - SLATER*.5'
# The functional below gives omega = 0.33, alpha = 0.6 * 0.65 = 0.39
# beta = -0.46 * 0.6 + 0.4 * 0.2(from HF of B3P86) = -0.196
mf.xc = '0.6*CAM_B3LYP+0.4*B3P86'
# The input XC description does not depend on the order of functionals. The
# functional below is the same to the functional above
mf.xc = '0.4*B3P86+0.6*CAM_B3LYP'
# Use SR_HF/LR_HF keywords to input range-separated functionals.
# When exact HF exchange is presented, it is split into SR and LR parts
# alpha = 0.8(from HF) + 0.22
# beta = 0.5 + 0.8(from HF)
mf.xc = '0.5*SR-HF(0.3) + .8*HF + .22*LR_HF'
# RSH is another keyword to input range-separated functionals
mf.xc = '0.5*RSH(0.3,2.04,0.56) + 0.5*BP86'
# A shorthand to input 'PBE,PBE', which is a compound functional. Note the
# shorthand input is different to the two examples 'PBE,' and ',PBE' below.
mf.xc = 'PBE'
# Exchange part only
mf.xc = 'PBE,'
# Correlation part only
mf.xc = ',PBE'
# When defining custom functionals, compound functional will affect both the
# exchange and correlation parts
mf.xc = 'PBE + SLATER*.5'
# The above input is equivalent to
mf.xc = 'PBE + SLATER*.5, PBE'
|
gkc1000/pyscf
|
examples/dft/24-custom_xc_functional.py
|
Python
|
apache-2.0
| 5,057
|
[
"PySCF"
] |
a4421dd194d26fa50ee82b80503f12a58c6d26a7cf348b56d0d4ae593c13b5c1
|
#!/usr/bin/python
# -*- coding: utf-8
import argparse
from textwrap import dedent
from uuid import uuid4 as uuid
from collections import deque
from ruffus import *
import subprocess
from threading import Thread
from optparse import OptionParser
import multiprocessing
from math import frexp
import os, sys
exe_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
sys.path.insert(0,os.path.abspath(os.path.join(exe_path,"..", "..")))
import logging
logger = logging.getLogger("run_parallel_blast")
import time
test_time = time.time()
MATCH_PART = 50
parser = OptionParser(version="%prog 1.0", usage = "\n\n %prog --input_file QUERY_FASTA --database_file FASTA_DATABASE --omcl_file OrthoMCL_Groups [more_options]")
""" formatter_class=argparse.RawDescriptionHelpFormatter,
description=dedent('''\
Parallel blastp + OrthoMCL
Author: Makarova Valentina, makarovavs07@gmail.com, 2015
Use: http://www.ruffus.org.uk/examples/bioinformatics/
In: set of proteins in fasta-file
Out: TSV-table, orthologGroups'''))"""
parser.add_option("-i", "--input_file", dest="input_file",
metavar="FILE",
type="string",
help="Name and path of query sequence file in FASTA format. ")
parser.add_option("-d", "--database_file", dest="database_file",
metavar="FILE",
type="string",
help="Name and path of FASTA database to search. ")
parser.add_option("-o", "--out_file", dest="out_file",
metavar="FILE",
type="string",
default="orthologGroups",
help="Name and path to out tsv-table of ortholog groups")
parser.add_option("-t", "--temp_directory", dest="temp_directory",
metavar="PATH",
type="string",
default="tmp",
help="Name and path of temporary directory where calculations "
"should take place. ")
parser.add_option('-g', "--groups_omcl_file", dest="omcl_file",
metavar='PATH',
default="groups_OrthoMCL-5.txt",
type=str, nargs=1,
help='Name and path to OrthoMCL groups file .fasta')
parser.add_option('-b', "--blastp_exe", dest="blastp",
metavar='PATH',
default="blastp",
type=str,
help='Name and path to blastp.exe file')
#
# general options: verbosity / logging
#
parser.add_option("-v", "--verbose", dest = "verbose",
action="count", default=0,
help="Print more detailed messages for each additional verbose level."
" E.g. run_parallel_blast --verbose --verbose --verbose ... (or -vvv)")
#
# pipeline
#
parser.add_option("-j", "--jobs", dest="jobs",
default=1,
metavar="jobs",
type="int",
help="Specifies the number of jobs (operations) to run in parallel.")
parser.add_option("--flowchart", dest="flowchart",
metavar="FILE",
type="string",
help="Print flowchart of the pipeline to FILE. Flowchart format "
"depends on extension. Alternatives include ('.dot', '.jpg', "
"'*.svg', '*.png' etc). Formats other than '.dot' require "
"the dot program to be installed (http://www.graphviz.org/).")
parser.add_option("-n", "--just_print", dest="just_print",
action="store_true", default=False,
help="Only print a trace (description) of the pipeline. "
" The level of detail is set by --verbose.")
parser.add_option("-p", '--pack_size', dest="pack_size", metavar='INT', default=1, type=int, nargs=1,
help='Count of protein for one run blast')
(options, remaining_args) = parser.parse_args()
if not options.flowchart:
if not options.database_file:
parser.error("\n\n\tMissing parameter --database_file FILE\n\n")
if not options.input_file:
parser.error("\n\n\tMissing parameter --input_file FILE\n\n")
if not options.omcl_file:
parser.error("\n\n\tMissing parameter --omcl_file FILE\n\n")
if options.verbose:
logger.setLevel(logging.DEBUG)
stderrhandler = logging.StreamHandler(sys.stderr)
stderrhandler.setFormatter(logging.Formatter(" %(message)s"))
stderrhandler.setLevel(logging.DEBUG)
logger.addHandler(stderrhandler)
original_fasta = options.input_file
database_file = options.database_file
temp_directory = options.temp_directory
out_file = options.out_file
pack_size = options.pack_size
output_file = open(options.out_file, 'w', 0)
blastp = options.blastp
file_queue = []
if __name__ == '__main__':
m = multiprocessing.Manager()
file_queue = m.Queue()
#groups_OrthoMCL_file= open(options.omcl_file, 'r')
groups_OrthoMCL = {};
for line in open(options.omcl_file, 'r'):
group_proteins = line.split()
group_name = group_proteins.pop(0)[:-1]
for protein in group_proteins:
temp = protein.split('|')
if temp[0] not in groups_OrthoMCL:
groups_OrthoMCL[temp[0]] = {}
groups_OrthoMCL[ temp[0] ][temp[1]] = group_name
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Functions
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
def run_cmd(cmd_str):
#Throw exception if run command fails
iteration = 0
while (iteration < 10):
try:
process = subprocess.Popen(cmd_str, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
#stdout_str, stderr_str = process.communicate()
stdout_str, stderr_str = process.communicate()
if process.returncode != 0:
print "Failed to run '%s'\nNon-zero exit status %s" %(cmd_str, process.returncode)
break
except:
continue
def orthomcl_daemon():
print "daemon start"
while not os.path.exists(os.path.join(temp_directory, "end")):
time.sleep(2)
f = open(os.path.join(temp_directory, "end"), 'r')
pr_q = int(f.read())
f.close()
os.remove(os.path.join(temp_directory, "end"))
completed_file_quantity = 0
while (completed_file_quantity < pr_q or os.listdir(temp_directory) > 0):
time.sleep(2)
while not file_queue.empty():
file_ind = file_queue.get()
#learn blast file
completed_file_quantity +=1
print completed_file_quantity, "/", pr_q
protein_hits = {}
hit_priority = deque()
hit_q = ''
hit_q_len = 0
f = open(file_ind, 'r')
for line in f:
if not len(line):
continue;
hit = line.split()
#h[0] qseqid
#h[1] sseqid
#h[2] qstart
#h[3] qend
#h[4] sstart
#h[5] send
#h[6] qlen
#h[7] slen
#h[8] evalue
#h[9] pident
#h[10] qcovs
if not hit_q_len:
hit_q_len = float(hit[6])
hit_q = hit[0]
hit_priority.append(hit[1])
if hit[1] not in protein_hits:
protein_hits[hit[1]] = {"evalue": hit[8], "pident": hit[9]}
if (hit_q_len < float(hit[7])):
protein_hits[hit[1]]["is_shorter"] = False
protein_hits[hit[1]]["match"] = hit[10]
else:
protein_hits[hit[1]]["is_shorter"] = True
protein_hits[hit[1]]["len"] = hit[7]
protein_hits[hit[1]]["sstart_send"] = [(hit[4], hit[5])]
elif protein_hits[hit[1]]["is_shorter"]:
protein_hits[hit[1]]["sstart_send"].append((hit[4], hit[5]))
#choose best hit
while (len(hit_priority) != 0):
hit_s = hit_priority.popleft()
match = 0
if not (protein_hits[hit_s]["is_shorter"]):
if (protein_hits[hit_s]["match"] < MATCH_PART):
continue;
match = protein_hits[hit_s]["match"]
else:
protein_hits[hit_s]["sstart_send"].sort(key=(lambda x: x[0]))
match_union = protein_hits[hit_s]["sstart_send"][1:]
match_len = 0;
st = int(protein_hits[hit_s]["sstart_send"][0][0])
en = int(protein_hits[hit_s]["sstart_send"][0][1])
for st_end_pair in match_union:
if (int(st_end_pair[0]) <= en):
en = int(st_end_pair[1])
else:
match_len += (en - st + 1)
st = int(st_end_pair[0])
en = int(st_end_pair[1])
match_len += (en - st + 1)
match = int(round(match_len * 100 / float(protein_hits[hit_s]["len"])))
print "hit_q", hit_q
#write best hit into file
if match > MATCH_PART:
tmp = hit_s.split("|")
gr = "NO_GROUP"
if (tmp[0] in groups_OrthoMCL) and (tmp[1] in groups_OrthoMCL[tmp[0]]):
gr = groups_OrthoMCL[tmp[0]][tmp[1]]
#evalue parsing
ev = (protein_hits[hit_s]["evalue"]).split("e")
if (ev[0] == "0.0") or (ev[0] == "0") or (int(ev[1]) < -180):
ev = [0, "-181"]
output_file.write(str(hit_q + " "+ gr + " " + hit_s + " " + str(ev[0]) + " " + str(int(ev[1]))+ \
" " + str(int(round(float(protein_hits[hit_s]["pident"]))))+ " " + str(match) +"\n"))
break
#close all one protein file and remove them
f.close()
#os.remove(file_ind)
tmp = file_ind.split(".")
tmp[-1] = "segment"
os.remove(".".join(tmp))
tmp[-1] = "blastSuccess"
if os.path.exists(".".join(tmp)):
for i in range(1,10):
try:
os.remove(".".join(tmp))
break
except:
continue
os.rmdir(temp_directory)
print (time.time() - test_time)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Pipeline tasks
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
@follows(mkdir(temp_directory))
@split(original_fasta, os.path.join(temp_directory, "*.segment"))
def splitFasta (seqFile, segments):
#Split sequence file into
# as many fragments as appropriate
# depending on the size of original_fasta
#
# Clean up any segment files from previous runs before creating new one
#
for i in segments:
os.unlink(i)
current_file_index = 0
#current_size = 0
for line in open(original_fasta):
#
# start a new file for each accession line
#
if line[0] == '>':
#current_size +=1
current_file_index += 1
#if (current_size >= pack_size)
file_name = "%d.segment" % current_file_index
file_path = os.path.join(temp_directory, file_name)
current_file = open(file_path, "w")
#current_size = 0
if current_file_index:
current_file.write(line)
end_file = open(os.path.join(temp_directory, "end_tmp"), 'w')
end_file.write(str(current_file_index))
end_file.close()
os.rename(os.path.join(temp_directory, "end_tmp"), os.path.join(temp_directory, "end"))
@transform(splitFasta, suffix(".segment"), [".blastResult", ".blastSuccess"], file_queue)
def runBlast(seqFile, output_files, file_queue):
#
blastResultFile, flag_file = output_files
cmd_str = blastp + " -db %s -query %s -out %s -evalue 1e-5 -outfmt \"6 qseqid sseqid qstart qend sstart send qlen slen evalue pident qcovs\""
run_cmd(cmd_str % (database_file, seqFile, blastResultFile))
file_queue.put(blastResultFile)
open(flag_file, "w")
time.sleep(5)
f = open(blastResultFile, 'r')
file_len = 0
for line in f:
if not len(line):
continue;
file_len += 1
print blastResultFile, " ", file_len
if __name__ == '__main__':
if options.just_print:
pipeline_printout(sys.stdout, [runBlast], verbose=options.verbose)
elif options.flowchart:
# use file extension for output format
output_format = os.path.splitext(options.flowchart)[1][1:]
pipeline_printout_graph (open(options.flowchart, "w"),
output_format,
[combineBlastResults],
no_key_legend = True)
else:
"""result_daemon = Thread(target=orthomcl_daemon)
result_daemon.setDaemon(True)
result_daemon.start() """
pipeline_run([runBlast], multiprocess = options.jobs,
logger = logger, verbose=options.verbose)
"""if (result_daemon.isAlive()):
result_daemon.join()
"""
print "without daemon"
|
Vmakarova/parallelblast
|
parallelblast.py
|
Python
|
apache-2.0
| 13,561
|
[
"BLAST"
] |
ffe6cc64c2b4d648aadd641ee560fc1c7aefb95c0255a610bf50eba90db3f9dd
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Fasta(MakefilePackage):
"""The FASTA programs find regions of local or global similarity
between Protein or DNA sequences, either by searching Protein or
DNA databases, or by identifying local duplications within a
sequence. Other programs provide information on the statistical
significance of an alignment. Like BLAST, FASTA can be used to
infer functional and evolutionary relationships between sequences
as well as help identify members of gene families.
"""
homepage = "https://fasta.bioch.virginia.edu/fasta_www2/fasta_list2.shtml"
url = "https://github.com/wrpearson/fasta36/archive/fasta-v36.3.8g.tar.gz"
version('36.3.8g', sha256='fa5318b6f8d6a3cfdef0d29de530eb005bfd3ca05835faa6ad63663f8dce7b2e')
depends_on('zlib')
# The src tree includes a plethora of variant Makefiles and the
# builder is expected to choose one that's appropriate. This'll
# do for a first cut. I can't test anything else....
@property
def makefile_name(self):
if self.spec.satisfies('platform=darwin'):
name = 'Makefile.os_x86_64'
elif self.spec.satisfies('platform=linux target=x86_64:'):
name = 'Makefile.linux64_sse2'
else:
tty.die('''Unsupported platform/target, must be
Darwin (assumes 64-bit)
Linux x86_64
''')
return name
@property
def makefile_path(self):
return join_path(self.stage.source_path, 'make', self.makefile_name)
def edit(self, spec, prefix):
makefile = FileFilter(self.makefile_path)
makefile.filter('XDIR = .*', 'XDIR = {0}'.format(prefix.bin))
def build(self, spec, prefix):
with working_dir('src'):
make('-f', self.makefile_path)
def install(self, spec, prefix):
with working_dir('src'):
mkdir(prefix.bin)
make('-f', self.makefile_path, 'install')
|
rspavel/spack
|
var/spack/repos/builtin/packages/fasta/package.py
|
Python
|
lgpl-2.1
| 2,127
|
[
"BLAST"
] |
46b4df18156eb7f9457f5337d73f09985ff3efff05af44be806f133bcd95fa13
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.