text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import tempfile
import numpy.testing.utils as nptu
from six.moves import zip
from io import open
import os
import json
from monty.json import MontyDecoder
from monty.serialization import loadfn
from monty.json import MSONable
from monty.dev import requires
from pymatgen import SETTINGS, MPRester
"""
Common test support for pymatgen test scripts.
This single module should provide all the common functionality for pymatgen
tests in a single location, so that test scripts can just import it and work
right away.
"""
class PymatgenTest(unittest.TestCase):
"""
Extends unittest.TestCase with functions (taken from numpy.testing.utils)
that support the comparison of arrays.
"""
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
STRUCTURES_DIR = os.path.join(MODULE_DIR, "structures")
"""
Dict for test structures to aid testing.
"""
TEST_STRUCTURES = {}
for fn in os.listdir(STRUCTURES_DIR):
TEST_STRUCTURES[fn.rsplit(".", 1)[0]] = loadfn(os.path.join(
STRUCTURES_DIR, fn), cls=MontyDecoder)
@classmethod
def get_structure(cls, name):
return cls.TEST_STRUCTURES[name].copy()
@classmethod
@requires(SETTINGS.get("PMG_MAPI_KEY"), "PMG_MAPI_KEY needs to be set.")
def get_mp_structure(cls, mpid):
m = MPRester()
return m.get_structure_by_material_id(mpid)
@staticmethod
def assertArrayAlmostEqual(actual, desired, decimal=7, err_msg='',
verbose=True):
"""
Tests if two arrays are almost equal to a tolerance. The CamelCase
naming is so that it is consistent with standard unittest methods.
"""
return nptu.assert_almost_equal(actual, desired, decimal, err_msg,
verbose)
@staticmethod
def assertArrayEqual(actual, desired, err_msg='', verbose=True):
"""
Tests if two arrays are equal. The CamelCase naming is so that it is
consistent with standard unittest methods.
"""
return nptu.assert_equal(actual, desired, err_msg=err_msg,
verbose=verbose)
def serialize_with_pickle(self, objects, protocols=None, test_eq=True):
"""
Test whether the object(s) can be serialized and deserialized with
pickle. This method tries to serialize the objects with pickle and the
protocols specified in input. Then it deserializes the pickle format
and compares the two objects with the __eq__ operator if
test_eq == True.
Args:
objects: Object or list of objects.
protocols: List of pickle protocols to test. If protocols is None,
HIGHEST_PROTOCOL is tested.
Returns:
Nested list with the objects deserialized with the specified
protocols.
"""
# Use the python version so that we get the traceback in case of errors
import pickle as pickle
from pymatgen.util.serialization import pmg_pickle_load, pmg_pickle_dump
# Build a list even when we receive a single object.
got_single_object = False
if not isinstance(objects, (list, tuple)):
got_single_object = True
objects = [objects]
if protocols is None:
# protocols = set([0, 1, 2] + [pickle.HIGHEST_PROTOCOL])
protocols = [pickle.HIGHEST_PROTOCOL]
# This list will contains the object deserialized with the different
# protocols.
objects_by_protocol, errors = [], []
for protocol in protocols:
# Serialize and deserialize the object.
mode = "wb"
fd, tmpfile = tempfile.mkstemp(text="b" not in mode)
try:
with open(tmpfile, mode) as fh:
pmg_pickle_dump(objects, fh, protocol=protocol)
except Exception as exc:
errors.append("pickle.dump with protocol %s raised:\n%s" %
(protocol, str(exc)))
continue
try:
with open(tmpfile, "rb") as fh:
new_objects = pmg_pickle_load(fh)
except Exception as exc:
errors.append("pickle.load with protocol %s raised:\n%s" %
(protocol, str(exc)))
continue
# Test for equality
if test_eq:
for old_obj, new_obj in zip(objects, new_objects):
self.assertEqual(old_obj, new_obj)
# Save the deserialized objects and test for equality.
objects_by_protocol.append(new_objects)
if errors:
raise ValueError("\n".join(errors))
# Return nested list so that client code can perform additional tests.
if got_single_object:
return [o[0] for o in objects_by_protocol]
else:
return objects_by_protocol
def tmpfile_write(self, string):
"""
Write string to a temporary file. Returns the name of the temporary
file.
"""
fd, tmpfile = tempfile.mkstemp(text=True)
with open(tmpfile, "w") as fh:
fh.write(string)
return tmpfile
def assertMSONable(self, obj, test_if_subclass=True):
"""
Tests if obj is MSONable and tries to verify whether the contract is
fulfilled.
By default, the method tests whether obj is an instance of MSONable.
This check can be deactivated by setting test_if_subclass to False.
"""
if test_if_subclass:
self.assertIsInstance(obj, MSONable)
self.assertDictEqual(obj.as_dict(), obj.__class__.from_dict(
obj.as_dict()).as_dict())
json.loads(obj.to_json(), cls=MontyDecoder)
| setten/pymatgen | pymatgen/util/testing.py | Python | mit | 6,017 | [
"pymatgen"
] | aac48cf2df4ae0e9f98f3147cf0f1aea9d01ebc1d947e625adb514fd2eccd9b3 |
import numpy
import os, inspect
from orbkit import read
from orbkit.core import rho_compute
from orbkit.test.tools import equal
from orbkit import grid
from orbkit import options
options.quiet = True
tests_home = os.path.dirname(inspect.getfile(inspect.currentframe()))
folder = os.path.join(tests_home, '../outputs_for_testing/molpro')
filepath = os.path.join(folder, 'h2o_rhf_sph.molden')
qc = read.main_read(filepath, all_mo=True)
grid.adjust_to_geo(qc, extend=2.0, step=1)
grid.grid_init(is_vector=False,force=True)
drv = [None,'x','y','z','xx','xy','xz','yy','yz','zz']
data = []
for i in range(2):
if i: grid.grid2vector()
data.append([
rho_compute(qc,slice_length=0),
rho_compute(qc,numproc=options.numproc),
rho_compute(qc,laplacian=True,slice_length=0)[-1],
rho_compute(qc,laplacian=True,numproc=options.numproc)[-1],
rho_compute(qc,calc_mo=True,drv=drv,slice_length=0),
rho_compute(qc,calc_mo=True,drv=drv,numproc=options.numproc)
])
data[1] = [grid.mv2g(d=i) for i in data[1]]
for i in range(len(data[0])):
equal(data[0][i],data[1][i])
filepath = os.path.join(tests_home, 'refdata_rho_compute.npz')
#numpy.savez(filepath, data=data[0])
refdata = numpy.load(filepath)
in_dic = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four'}
for i in range(5):
equal(data[0][i], refdata[in_dic[i]])
| orbkit/orbkit | orbkit/test/grid_based/rho_compute.py | Python | lgpl-3.0 | 1,400 | [
"Molpro"
] | 42e7f57573a8c6cadea695bdfb4015146cb17951911c566246f1d714d2fc7728 |
# Copyright 2017 Priscilla Boyd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The Plotter module plots a limited amount of examples (to avoid graph cluttering), helpign understanding the nature
and the components available in the data.
"""
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
def plot_phase_vs_dt(df, seconds, analysis_folder):
"""
Plot phase versus date/time, taking data frame and number of seconds to plot.
:param object df: dataframe
:param int seconds: number of seconds to plot
:param string analysis_folder: analysis folder location
"""
# define x values and data source (sliced from data frame)
x = [0, 1, 2, 3]
df = df[:seconds]
# define index based on date_time column from the data
df.set_index(['Date_Time'], inplace=True)
# plot the data
df.plot()
# configure plot
plt.locator_params(axis='x', nbins=10)
labels = ['Red', 'Red + Amber', 'Amber', 'Green']
plt.yticks(x, labels, rotation='vertical')
plt.xlabel('Time')
plt.ylabel('Phase')
plt.yticks(np.arange(0, 4, 1))
# save plot to file
plt.savefig(analysis_folder + 'phase_vs_dt.png')
# display plot
plt.show()
def plot_phases_info(df, seconds, analysis_folder):
"""
Plot sub-plots of each phase within a data frame for the given number of seconds.
:param object df: dataframe
:param int seconds: number of seconds to plot
:param string analysis_folder: analysis folder location
"""
# define x values and data source (sliced from data frame)
df = df[:seconds]
df_a = df.loc[:, ['Date_Time', 'A']]
df_b = df.loc[:, ['Date_Time', 'B']]
df_c = df.loc[:, ['Date_Time', 'C']]
df_d = df.loc[:, ['Date_Time', 'D']]
# configure plot
labels = ['Red', 'Red/Amber', 'Amber', 'Green']
fig, axes = plt.subplots(nrows=2, ncols=2)
plt.setp(axes, yticks=[0, 1, 2, 3], yticklabels=labels, xlabel='Time (seconds)')
# plot the data
df_a.plot(ax=axes[0, 0], color='b')
axes[0, 0].set_title('A')
df_b.plot(ax=axes[0, 1], color='g')
axes[0, 1].set_title('B')
df_c.plot(ax=axes[1, 0], color='r')
axes[1, 0].set_title('C')
df_d.plot(ax=axes[1, 1], color='k')
axes[1, 1].set_title('D')
plt.tight_layout()
# save plot to file
plt.savefig(analysis_folder + 'phases_info.png')
# display plot
plt.show()
def plot_correlation(df, analysis_folder):
"""
Plot correlation of stages as a heat map.
:param object df: dataframe
:param string analysis_folder: analysis folder location
"""
# configure the heat map
pd.plotting.scatter_matrix(df, alpha=0.3, figsize=(14, 8), diagonal='kde')
f, ax = plt.subplots(figsize=(10, 8))
corr = df.corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
# save plot to file
plt.savefig(analysis_folder + 'phase_correlation.png')
# display plot
plt.show()
| priscillaboyd/SPaT_Prediction | src/analysis/Plotter.py | Python | apache-2.0 | 3,691 | [
"Amber"
] | b57a92b2cfa044743722b4f49269ae15a72192b09c957e820cf1955d688b3947 |
"""
spaceshooter.py
Author: Hagin
Credit: myself, Morgan, Mr. Dennison
Assignment:
Write and submit a program that implements the spacewar game:
https://github.com/HHS-IntroProgramming/Spacewar
"""
from ggame import App, RectangleAsset, ImageAsset, SoundAsset, Sprite, Sound, LineStyle, Color, Frame
import math
import time
SCREEN_WIDTH = 1536
SCREEN_HEIGHT = 1024
class Sun(Sprite):
asset = ImageAsset("images/sun.png")
width = 80
height = 76
def __init__(self, position):
super().__init__(Sun.asset, position)
self.fxcenter = 0.5
self.fycenter = 0.5
self.circularCollisionModel()
class Ship1(Sprite):
asset = ImageAsset("images/four_spaceship_by_albertov_with_thrust.png",
Frame(227,0,292-227,125), 4, 'vertical')
def __init__(self, position):
super().__init__(Ship1.asset, position)
self.vr = 0.00
self.thrust = 0
self.thrustframe = 1
self.VX = 0
self.VY = 0
self.vx = 0
self.vy = 0
self.turn = 0
SpaceGame.listenKeyEvent("keydown", "up arrow", self.thrustOn)
SpaceGame.listenKeyEvent("keyup", "up arrow", self.thrustOff)
SpaceGame.listenKeyEvent("keydown", "left arrow", self.rotateLeft)
SpaceGame.listenKeyEvent("keyup", "left arrow", self.lrOff)
SpaceGame.listenKeyEvent("keydown", "right arrow", self.rotateRight)
SpaceGame.listenKeyEvent("keyup", "right arrow", self.rrOff)
SpaceGame.listenKeyEvent("keypress", "enter", self.fire)
self.fxcenter = self.fycenter = 0.5
self.bullet = None
def step(self):
self.rotation += 1.5*self.vr
self.move()
if self.thrust == 1:
self.VX += self.vx
self.VY += self.vy
if 0 <= self.x <= SCREEN_WIDTH:
self.x -= 0.1*self.VX
elif self.x < 0:
self.x += SCREEN_WIDTH
self.x -= 0.1*self.VX
else:
self.x -= (0.1*self.VX + SCREEN_WIDTH)
if 0 <= self.y <= SCREEN_HEIGHT:
self.y -= 0.1*self.VY
elif self.y < 0:
self.y += SCREEN_HEIGHT
self.y -= 0.1*self.VY
else:
self.y -= (0.1*self.VY + SCREEN_HEIGHT)
if self.thrust == 1:
self.setImage(self.thrustframe)
self.thrustframe += 1
if self.thrustframe == 4:
self.thrustframe = 1
else:
self.setImage(0)
collides = self.collidingWithSprites(Ship2)
collides.extend(self.collidingWithSprites(Bullet))
if len(collides):
if collides[0].visible:
collides[0].explode()
self.explode()
def move(self):
self.X = math.sin(self.rotation)
self.Y = math.cos(self.rotation)
self.vx = self.X/math.sqrt(self.X*self.X + self.Y*self.Y)
self.vy = self.Y/math.sqrt(self.X*self.X + self.Y*self.Y)
def explode(self):
self.visible = False
ExplosionBig(self.position)
self.waitspawn = 5
def thrustOn(self, event):
self.thrust = 1
def thrustOff(self, event):
self.thrust = 0
def rotateLeft(self, event):
self.vr = 0.05
def lrOff(self, event):
self.vr = 0
def rotateRight(self, event):
self.vr = -0.05
def rrOff(self, event):
self.vr = 0
def fire(self, event):
self.bullet= Bullet(self.position, self.vx, self.vy)
class Ship2(Sprite):
asset = ImageAsset("images/four_spaceship_by_albertov_with_thrust.png",
Frame(0,0,86,125), 4, 'vertical')
def __init__(self, position):
super().__init__(Ship2.asset, position)
self.vr = 0.00
self.thrust = 0
self.thrustframe = 1
self.VX = 0
self.VY = 0
self.vx = 0
self.vy = 0
self.turn = 0
SpaceGame.listenKeyEvent("keydown", "w", self.thrustOn)
SpaceGame.listenKeyEvent("keyup", "w", self.thrustOff)
SpaceGame.listenKeyEvent("keydown", "a", self.rotateLeft)
SpaceGame.listenKeyEvent("keyup", "a", self.lrOff)
SpaceGame.listenKeyEvent("keydown", "d", self.rotateRight)
SpaceGame.listenKeyEvent("keyup", "d", self.rrOff)
SpaceGame.listenKeyEvent("keypress", "e", self.fire)
self.fxcenter = self.fycenter = 0.5
self.bullet = None
def step(self):
self.rotation += 1.5*self.vr
self.move()
if self.thrust == 1:
self.VX += self.vx
self.VY += self.vy
if 0 <= self.x <= SCREEN_WIDTH:
self.x -= 0.1*self.VX
elif self.x < 0:
self.x += SCREEN_WIDTH
self.x -= 0.1*self.VX
else:
self.x -= (0.1*self.VX + SCREEN_WIDTH)
if 0 <= self.y <= SCREEN_HEIGHT:
self.y -= 0.1*self.VY
elif self.y < 0:
self.y += SCREEN_HEIGHT
self.y -= 0.1*self.VY
else:
self.y -= (0.1*self.VY + SCREEN_HEIGHT)
if self.thrust == 1:
self.setImage(self.thrustframe)
self.thrustframe += 1
if self.thrustframe == 4:
self.thrustframe = 1
else:
self.setImage(0)
collides = self.collidingWithSprites(Ship1)
if len(collides):
if collides[0].visible:
collides[0].explode()
self.explode()
def move(self):
self.X = math.sin(self.rotation)
self.Y = math.cos(self.rotation)
self.vx = self.X/math.sqrt(self.X*self.X + self.Y*self.Y)
self.vy = self.Y/math.sqrt(self.X*self.X + self.Y*self.Y)
def explode(self):
self.visible = False
ExplosionBig(self.position)
self.waitspawn = 5
def thrustOn(self, event):
self.thrust = 1
def thrustOff(self, event):
self.thrust = 0
def rotateLeft(self, event):
self.vr = 0.05
def lrOff(self, event):
self.vr = 0
def rotateRight(self, event):
self.vr = -0.05
def rrOff(self, event):
self.vr = 0
def fire(self, event):
self.bullet= Bullet(self.position, self.vx, self.vy)
class Bullet(Sprite):
asset = ImageAsset("images/blast.png", Frame(0,0,8,8), 8)
pewasset = SoundAsset("sounds/pew1.mp3")
def __init__(self, position, vx, vy):
super().__init__(Bullet.asset, position)
self.exist = True
self.circularCollisionModel()
self.pew = Sound(Bullet.pewasset)
self.pew.play()
self.appear = 1
self.fxcenter = 0.5
self.fycenter = 0
self.X = vx
self.Y = vy
def step(self):
self.visible = True
if self.exist:
self.setImage(self.appear)
self.appear += 1
if self.appear == 8:
self.appear = 1
else:
self.setImage(0)
self.x -= 15*self.X
self.y -= 15*self.Y
collides = self.collidingWithSprites(Ship2)
collides.extend(self.collidingWithSprites(Ship1))
if len(collides):
if collides[0].visible:
self.visible = False
else:
self.visible = True
if self.x < 0 or self.x > SCREEN_WIDTH or self.y < 0 or self.y >SCREEN_HEIGHT:
self.destroy()
class HealthBar:
def __init__(self, indicatorasset, initvalue, position, app):
self.sprites = [Sprite(indicatorasset, (0,app.height-75)) for i in range(initvalue)]
for s in self.sprites:
s.scale = 0.4
width = self.sprites[0].width
if position == 'left':
x = 50
step = width+5
else:
x = app.width - 50 - width
step = -width-5
for s in self.sprites:
s.x = x
x += step
self.restart()
def restart(self):
for s in self.sprites:
s.visible = True
self.count = len(self.sprites)
def dead(self):
return self.count == 0
def killone(self):
if self.count > 0:
self.count -= 1
self.sprites[self.count].visible = False
class ExplosionSmall(Sprite):
asset = ImageAsset("images/explosion1.png", Frame(0,0,128,128), 10)
boomasset = SoundAsset("sounds/explosion1.mp3")
def __init__(self, position):
super().__init__(ExplosionSmall.asset, position)
self.image = 0
self.center = (0.5, 0.5)
self.boom = Sound(ExplosionSmall.boomasset)
self.boom.play()
def step(self):
self.setImage(self.image//2)
self.image += 1
if self.image == 20:
self.destroy()
class ExplosionBig(Sprite):
asset = ImageAsset("images/explosion2.png", Frame(0,0,4800/25,195), 25)
boomasset = SoundAsset("sounds/explosion2.mp3")
def __init__(self, position):
super().__init__(ExplosionBig.asset, position)
self.image = 0
self.center = (0.5, 0.5)
self.boom = Sound(ExplosionBig.boomasset)
self.boom.play()
def step(self):
self.setImage(self.image//2)
self.image += 1
if self.image == 50:
self.destroy()
class SpaceGame(App):
def __init__(self, width, height):
super().__init__(width, height)
black = Color(0, 1)
noline = LineStyle(0, black)
bg_asset = ImageAsset("images/starfield.jpg")
bg = Sprite(bg_asset, (0,0))
bg1 = Sprite(bg_asset, (512,512))
bg2 = Sprite(bg_asset, (0,512))
bg3 = Sprite(bg_asset, (512,0))
bg4 = Sprite(bg_asset, (1024,512))
bg5 = Sprite(bg_asset, (1024,0))
Ship1((250,250))
Ship2((1000,250))
Sun((650,350))
def step(self):
for ship in self.getSpritesbyClass(Ship1):
ship.step()
for ship in self.getSpritesbyClass(Ship2):
ship.step()
explosions = self.getSpritesbyClass(ExplosionSmall)
for explosion in explosions:
explosion.step()
explosions = self.getSpritesbyClass(ExplosionBig)
for explosion in explosions:
explosion.step()
for bullets in self.getSpritesbyClass(Bullet):
bullets.step()
myapp = SpaceGame(SCREEN_WIDTH, SCREEN_HEIGHT)
myapp.run()
| HaginCodes/Space-Shooter | spaceshooter.py | Python | mit | 10,585 | [
"BLAST"
] | 252c2203041f16d13a0ad46cbc8bc5faba7b8bc9b86a7e4a523d673848af72ce |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
import logging
from MooseDocs.test import MooseDocsTestCase
from MooseDocs.extensions import core, command, pysyntax
from MooseDocs import base
logging.basicConfig()
class TestObj(object):
"""class doc"""
def __init__(self, data):
pass
@property
def __internal_prop__(self):
"""internal property doc"""
pass
@property
def public_prop(self):
"""public property doc"""
pass
@property
def _protected_prop(self):
"""protected property doc"""
pass
@property
def __private_prop(self):
"""private property doc"""
pass
def publicMethod(self, arg):
"""public method doc"""
pass
def _protectedMethod(self, arg):
"""protected method doc"""
pass
def __privateMethod(self, arg):
"""private method doc"""
pass
def __internalMethod__(self, arg):
"""internal method doc"""
pass
class TestPySyntax(unittest.TestCase):
def testInfo(self):
info = pysyntax.PySyntax.Info('public_prop', TestObj.public_prop)
self.assertEqual(info.internal, False)
self.assertEqual(info.private, False)
self.assertEqual(info.protected, False)
self.assertEqual(info.public, True)
self.assertEqual(info.function, False)
self.assertEqual(info.signature, None)
self.assertEqual(info.documentation, "public property doc")
info = pysyntax.PySyntax.Info('_protected_prop', TestObj._protected_prop)
self.assertEqual(info.internal, False)
self.assertEqual(info.private, False)
self.assertEqual(info.protected, True)
self.assertEqual(info.public, False)
self.assertEqual(info.function, False)
self.assertEqual(info.signature, None)
self.assertEqual(info.documentation, "protected property doc")
info = pysyntax.PySyntax.Info('_TestObj__private_prop', TestObj._TestObj__private_prop)
self.assertEqual(info.internal, False)
self.assertEqual(info.private, True)
self.assertEqual(info.protected, False)
self.assertEqual(info.public, False)
self.assertEqual(info.function, False)
self.assertEqual(info.signature, None)
self.assertEqual(info.documentation, "private property doc")
info = pysyntax.PySyntax.Info('__internal_prop__', TestObj.__internal_prop__)
self.assertEqual(info.internal, True)
self.assertEqual(info.private, False)
self.assertEqual(info.protected, False)
self.assertEqual(info.public, False)
self.assertEqual(info.function, False)
self.assertEqual(info.signature, None)
self.assertEqual(info.documentation, "internal property doc")
info = pysyntax.PySyntax.Info('publicMethod', TestObj.publicMethod)
self.assertEqual(info.internal, False)
self.assertEqual(info.private, False)
self.assertEqual(info.protected, False)
self.assertEqual(info.public, True)
self.assertEqual(info.function, True)
self.assertEqual(info.signature, '(arg)')
self.assertEqual(info.documentation, "public method doc")
info = pysyntax.PySyntax.Info('_protectedMethod', TestObj._protectedMethod)
self.assertEqual(info.internal, False)
self.assertEqual(info.private, False)
self.assertEqual(info.protected, True)
self.assertEqual(info.public, False)
self.assertEqual(info.function, True)
self.assertEqual(info.signature, '(arg)')
self.assertEqual(info.documentation, "protected method doc")
info = pysyntax.PySyntax.Info('_TestObj__privateMethod', TestObj._TestObj__privateMethod)
self.assertEqual(info.internal, False)
self.assertEqual(info.private, True)
self.assertEqual(info.protected, False)
self.assertEqual(info.public, False)
self.assertEqual(info.function, True)
self.assertEqual(info.signature, '(arg)')
self.assertEqual(info.documentation, "private method doc")
info = pysyntax.PySyntax.Info('__internalMethod__', TestObj.__internalMethod__)
self.assertEqual(info.internal, True)
self.assertEqual(info.private, False)
self.assertEqual(info.protected, False)
self.assertEqual(info.public, False)
self.assertEqual(info.function, True)
self.assertEqual(info.signature, '(arg)')
self.assertEqual(info.documentation, "internal method doc")
def testClass(self):
Info = pysyntax.PySyntax.Info
doc = pysyntax.PySyntax(TestObj)
self.assertEqual(doc.documentation, 'class doc')
self.assertEqual(doc.filename, __file__)
self.assertEqual(doc.signature, '(data)')
public = [('publicMethod', Info('publicMethod', TestObj.publicMethod)),
('public_prop', Info('public_prop', TestObj.public_prop))]
protected = [('_protectedMethod', Info('_protectedMethod', TestObj._protectedMethod)),
('_protected_prop', Info('_protected_prop', TestObj._protected_prop))]
private = [('_TestObj__privateMethod', Info('_TestObj__privateMethod', TestObj._TestObj__privateMethod)),
('_TestObj__private_prop', Info('_TestObj__private_prop', TestObj._TestObj__private_prop))]
internal = [('__internalMethod__', Info('__internalMethod__', TestObj.__internalMethod__)),
('__internal_prop__', Info('__internal_prop__', TestObj.__internal_prop__))]
mem = [(k,v) for k, v in doc.items(public=True)]
self.assertEqual(mem, public)
mem = [(k,v) for k, v in doc.items(protected=True)]
self.assertEqual(mem, protected)
mem = [(k,v) for k, v in doc.items(private=True)]
self.assertEqual(mem, private)
mem = [(k,v) for k, v in doc.items(internal=True)]
self.assertIn(internal[0], mem)
self.assertIn(internal[1], mem)
mem = [(k,v) for k, v in doc.items(function=True)]
self.assertIn(public[0], mem)
self.assertNotIn(public[1], mem)
self.assertIn(protected[0], mem)
self.assertNotIn(protected[1], mem)
self.assertIn(private[0], mem)
self.assertNotIn(private[1], mem)
self.assertIn(internal[0], mem)
self.assertNotIn(internal[1], mem)
mem = [(k,v) for k, v in doc.items(public=True, protected=True)]
self.assertEqual(mem, protected + public)
mem = [(k,v) for k, v in doc.items(public=True, private=True)]
self.assertEqual(mem, private + public)
mem = [(k,v) for k, v in doc.items(public=True, function=True)]
self.assertEqual(mem, public[:1])
class TestPySyntaxExtension(MooseDocsTestCase):
EXTENSIONS = [core, command, pysyntax]
def testClassCommandAST(self):
ast = self.tokenize('!pysyntax class name=MooseDocs.extensions.pysyntax.PySyntaxExtension')
self.assertToken(ast(0), 'PyClass')
self.assertToken(ast(0,0), 'Heading', level=2, class_='moose-pysyntax-class-heading', string='MooseDocs.extensions.pysyntax.PySyntaxExtension')
self.assertToken(ast(0,1), 'Monospace', string='MooseDocs.extensions.pysyntax.PySyntaxExtension(**kwargs)')
self.assertToken(ast(0,2), 'Paragraph', size=14)
self.assertToken(ast(0,2,0), 'Word', content='Extension')
self.assertToken(ast(0,3), 'Heading', level=3, class_='moose-pysyntax-member-heading')
self.assertToken(ast(0,3,0), 'Strong')
self.assertToken(ast(0,3,0,0), 'Monospace', string='EXTENSION_COMMANDS')
self.assertToken(ast(0,4), 'Paragraph')
def testFunctionCommandAST(self):
ast = self.tokenize('!pysyntax function name=MooseDocs.extensions.pysyntax.make_extension')
self.assertToken(ast(0), 'PyFunction')
self.assertToken(ast(0,0), 'Heading', level=2, class_='moose-pysyntax-member-heading')
self.assertToken(ast(0,0,0), 'Strong')
self.assertToken(ast(0,0,0,0), 'Monospace', string='make_extension(**kwargs)')
if __name__ == '__main__':
unittest.main(verbosity=2)
| harterj/moose | python/MooseDocs/test/extensions/test_pysyntax.py | Python | lgpl-2.1 | 8,466 | [
"MOOSE"
] | 7704a2e3947dbb75501bb46e3c04f18af7c199dc7167a7e1992bf553329e73d3 |
'''
Python script to crunch sprite assets in Unity by a constant value (for reducing filesize with big images
This automatically applies to .meta files, so nothing changes in game, and it works on Multiple sprite assets as well.
PIL (Python Image Library) needs to be installed to use this.
@author: Brian Intile
'''
import os
from PIL import Image
import PIL
def get_immediate_subdirectories(directory):
return [name for name in os.listdir(directory)
if os.path.isdir(os.path.join(directory, name))]
def browse_for_traits(directory, recurse, mult):
for dir_0 in os.listdir(directory):
if (os.path.isfile(directory + u'\\' + dir_0) & ((dir_0.split('.')[-1].lower() == u'png'.lower()) | (dir_0.split('.')[-1].lower() == u'jpg'.lower()))):
change_value_names(directory + '\\' + dir_0, mult)
if (recurse):
directories = get_immediate_subdirectories(directory)
for x in range (0, len(directories)):
browse_for_traits(directory + '\\' + directories[x], recurse, mult)
def count_leading_whitespace(line):
for x in range(len(line)):
if (line[x] != ' '):
return x
return len(line)
def multiply_bounds(contents, mult):
in_rect = False
whitespace_count = 0
for x in range(len(contents)):
if (in_rect):
if (count_leading_whitespace(contents[x]) == whitespace_count):
category = contents[x].split(':')[0].split(' ')[-1]
if (category == 'x' or category == 'y' or category == 'width' or category == 'height'):
number = contents[x].split(' ')[-1]
contents[x] = contents[x].replace(number, str(int(float(number) * mult)))
else:
in_rect = False
elif (contents[x].split(' ')[-1] == 'rect:'):
whitespace_count = count_leading_whitespace(contents[x]) + 2
in_rect = True
elif (contents[x][count_leading_whitespace(contents[x]):].split(':')[0] == "spritePixelsToUnits"):
number = contents[x].split(' ')[-1]
result = float(float(number) * mult)
if (result % 1 == 0):
result = int(result)
contents[x] = contents[x].replace(number, str(result))
return contents
def change_value_names(name, mult):
if not (os.path.isfile(name)):
print("File " + name + " not found")
return
img = Image.open(name)
width = int(float(img.size[0]) * mult)
height = int(float(img.size[1] * mult))
img = img.resize((width, height), PIL.Image.ANTIALIAS)
img.save(name)
if not (os.path.isfile(name + '.meta')):
print("File " + name + " has no associated .meta file")
return
f = open(name + '.meta', 'r')
contents = multiply_bounds(f.read().split('\n'), mult)
f.close()
f = open(name + '.meta', 'w')
f.write(contents[0])
for x in range(1, len(contents)):
f.write('\n' + contents[x])
f.close()
file_path = os.path.dirname(os.path.realpath(__file__))
#file_path = str(file_path, 'utf8')
while True:
command = input("Unity Sprite Resizer, this will rescale your image(s) and adjust the .meta accordingly for split sprites."
+ "\nEnter an image filename to resize that sprite by a constant."
+ "\nEnter 'a' to resize all sprites in the folder by a constant. (pngs or jpgs only)"
+ "\nEnter 'r' to resize all sprites in the folder AND all subfolders by a constant. (pngs or jpgs only)"
+ "\nEnter 'q' to quit:\n")
if command == 'q':
break
mult = float(input("Enter the number you wish to multiply each dimension by (use values <1 for crunching assets):\n"))
if command == 'a':
browse_for_traits(file_path, False, mult)
elif command == 'r':
browse_for_traits(file_path, True, mult)
else:
change_value_names(file_path + '\\' + command, mult) | NitorInc/NitoriWare | Assets/Microgames/KnifeDodge/Sprites/ResizeSprite.py | Python | mit | 3,918 | [
"Brian"
] | 89df2968f39aa0592252364ba3d2bacd033f39ede09ab1094edbb942ba2f3e6e |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author:
- '"Jeroen Hoekx (@jhoekx)" <jeroen.hoekx@dsquare.be>'
- '"Alexander Bulimov (@abulimov)" <lazywolf0@gmail.com>'
module: lvol
short_description: Configure LVM logical volumes
description:
- This module creates, removes or resizes logical volumes.
version_added: "1.1"
options:
vg:
description:
- The volume group this logical volume is part of.
required: true
lv:
description:
- The name of the logical volume.
required: true
size:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
resizing is not supported with percentages.
state:
choices: [ "present", "absent" ]
default: present
description:
- Control if the logical volume exists.
required: false
force:
version_added: "1.5"
choices: [ "yes", "no" ]
default: "no"
description:
- Shrink or remove operations of volumes requires this switch. Ensures that
that filesystems get never corrupted/destroyed by mistake.
required: false
notes:
- Filesystems on top of the volume are not resized.
'''
EXAMPLES = '''
# Create a logical volume of 512m.
- lvol: vg=firefly lv=test size=512
# Create a logical volume of 512g.
- lvol: vg=firefly lv=test size=512g
# Create a logical volume the size of all remaining space in the volume group
- lvol: vg=firefly lv=test size=100%FREE
# Extend the logical volume to 1024m.
- lvol: vg=firefly lv=test size=1024
# Reduce the logical volume to 512m
- lvol: vg=firefly lv=test size=512 force=yes
# Remove the logical volume.
- lvol: vg=firefly lv=test state=absent force=yes
'''
import re
decimal_point = re.compile(r"(\.|,)")
def parse_lvs(data):
lvs = []
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
'name': parts[0],
'size': int(decimal_point.split(parts[1])[0]),
})
return lvs
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(required=True),
lv=dict(required=True),
size=dict(),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
),
supports_check_mode=True,
)
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
state = module.params['state']
force = module.boolean(module.params['force'])
size_opt = 'L'
size_unit = 'm'
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
size_parts = size.split('%', 1)
size_percent = int(size_parts[0])
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
if size_whole == 'ORIGIN':
module.fail_json(msg="Snapshot Volumes are not supported")
elif size_whole not in ['VG', 'PVS', 'FREE']:
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
size_opt = 'l'
size_unit = ''
# LVCREATE(8) -L --size option unit
elif size[-1].isalpha():
if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower()
if size[0:-1].isdigit():
size = int(size[0:-1])
else:
module.fail_json(msg="Bad size specification for unit %s" % size_unit)
size_opt = 'L'
else:
module.fail_json(msg="Size unit should be one of [bBsSkKmMgGtTpPeE]")
# when no unit, megabytes by default
elif size.isdigit():
size = int(size)
else:
module.fail_json(msg="Bad size specification")
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
"%s --noheadings --nosuffix -o lv_name,size --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
changed = False
lvs = parse_lvs(current_lvs)
for test_lv in lvs:
if test_lv['name'] == lv:
this_lv = test_lv
break
else:
this_lv = None
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
else:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
msg = ''
if this_lv is None:
if state == 'present':
### create LV
if module.check_mode:
changed = True
else:
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
rc, _, err = module.run_command("%s -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg))
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
### remove LV
if module.check_mode:
module.exit_json(changed=True)
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
rc, _, err = module.run_command("%s --force %s/%s" % (lvremove_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
elif size_opt == 'l':
module.exit_json(changed=False, msg="Resizing extents with percentage not supported.")
else:
### resize LV
tool = None
if size > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
elif size < this_lv['size']:
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
if module.check_mode:
changed = True
else:
rc, _, err = module.run_command("%s -%s %s%s %s/%s" % (tool, size_opt, size, size_unit, vg, this_lv['name']))
if rc == 0:
changed = True
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| atlashealth/ansible-modules-extras | system/lvol.py | Python | gpl-3.0 | 8,348 | [
"Firefly"
] | 0233cb91ac4c2d78ffb4dc1504658b93e51b1ac298383f4af36fdee0ff740489 |
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reinforcement learning models and parameters."""
import collections
import functools
import operator
import gym
import six
from tensor2tensor.data_generators import gym_env
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import video_utils
from tensor2tensor.envs import tic_tac_toe_env
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import discretization
from tensor2tensor.layers import modalities
from tensor2tensor.models.video import basic_deterministic_params
from tensor2tensor.models.video import basic_stochastic
from tensor2tensor.rl.envs.py_func_batch_env import PyFuncBatchEnv
from tensor2tensor.rl.envs.simulated_batch_env import SimulatedBatchEnv
from tensor2tensor.rl.envs.simulated_batch_gym_env import SimulatedBatchGymEnv
from tensor2tensor.utils import hparam
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
from tensor2tensor.utils import trainer_lib
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
@registry.register_hparams
def ppo_base_v1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.learning_rate_schedule = "constant"
hparams.learning_rate_constant = 1e-4
hparams.clip_grad_norm = 0.5
hparams.weight_decay = 0
# If set, extends the LR warmup to all epochs except the final one.
hparams.add_hparam("lr_decay_in_final_epoch", False)
hparams.add_hparam("init_mean_factor", 0.1)
hparams.add_hparam("init_logstd", 0.1)
hparams.add_hparam("policy_layers", (100, 100))
hparams.add_hparam("value_layers", (100, 100))
hparams.add_hparam("clipping_coef", 0.2)
hparams.add_hparam("gae_gamma", 0.99)
hparams.add_hparam("gae_lambda", 0.95)
hparams.add_hparam("entropy_loss_coef", 0.01)
hparams.add_hparam("value_loss_coef", 1)
hparams.add_hparam("optimization_epochs", 15)
hparams.add_hparam("epoch_length", 200)
hparams.add_hparam("epochs_num", 2000)
hparams.add_hparam("eval_every_epochs", 10)
hparams.add_hparam("save_models_every_epochs", 30)
hparams.add_hparam("optimization_batch_size", 50)
hparams.add_hparam("intrinsic_reward_scale", 0.)
hparams.add_hparam("logits_clip", 0.0)
hparams.add_hparam("dropout_ppo", 0.1)
hparams.add_hparam("effective_num_agents", None)
hparams.add_hparam("use_epochs", True)
# TODO(afrozm): Clean this up, this is used in PPO learner to get modalities.
hparams.add_hparam("policy_problem_name", "dummy_policy_problem")
return hparams
@registry.register_hparams
def basic_policy_parameters():
wrappers = None
return hparam.HParams(wrappers=wrappers)
@registry.register_hparams
def ppo_discrete_action_base():
hparams = ppo_base_v1()
hparams.add_hparam("policy_network", "feed_forward_categorical_policy")
return hparams
@registry.register_hparams
def discrete_random_action_base():
hparams = common_hparams.basic_params1()
hparams.add_hparam("policy_network", "random_policy")
return hparams
@registry.register_hparams
def ppo_atari_base():
"""Pong base parameters."""
hparams = ppo_discrete_action_base()
hparams.learning_rate_constant = 1e-4
hparams.epoch_length = 200
hparams.gae_gamma = 0.985
hparams.gae_lambda = 0.985
hparams.entropy_loss_coef = 0.003
hparams.value_loss_coef = 1
hparams.optimization_epochs = 3
hparams.epochs_num = 1000
hparams.policy_network = "feed_forward_cnn_small_categorical_policy"
hparams.clipping_coef = 0.2
hparams.optimization_batch_size = 20
hparams.clip_grad_norm = 0.5
return hparams
@registry.register_hparams
def ppo_original_params():
"""Parameters based on the original PPO paper."""
hparams = ppo_atari_base()
hparams.learning_rate_constant = 2.5e-4
hparams.gae_gamma = 0.99
hparams.gae_lambda = 0.95
hparams.clipping_coef = 0.1
hparams.value_loss_coef = 1
hparams.entropy_loss_coef = 0.01
hparams.eval_every_epochs = 200
hparams.dropout_ppo = 0.1
# The parameters below are modified to accommodate short epoch_length (which
# is needed for model based rollouts).
hparams.epoch_length = 50
hparams.optimization_batch_size = 20
return hparams
@registry.register_hparams
def ppo_dist_params():
"""Parameters based on the original paper modified for distributional RL."""
hparams = ppo_original_params()
hparams.learning_rate_constant = 1e-3
return hparams
@registry.register_hparams
def ppo_original_tiny():
"""Parameters based on the original PPO paper, tiny version."""
hparams = ppo_original_params()
hparams.epoch_length = 5
hparams.optimization_batch_size = 1
return hparams
@registry.register_hparams
def ppo_ttt_params():
"""Parameters based on the original PPO paper."""
hparams = ppo_original_tiny()
hparams.policy_network = "feed_forward_categorical_policy"
hparams.policy_problem_name = "dummy_policy_problem_ttt"
return hparams
@registry.register_hparams
def ppo_original_params_gamma95():
"""Parameters based on the original PPO paper, changed gamma."""
hparams = ppo_original_params()
hparams.gae_gamma = 0.95
return hparams
@registry.register_hparams
def ppo_original_params_gamma90():
"""Parameters based on the original PPO paper, changed gamma."""
hparams = ppo_original_params()
hparams.gae_gamma = 0.90
return hparams
@registry.register_hparams
def ppo_original_world_model():
"""Atari parameters with world model as policy."""
hparams = ppo_original_params()
hparams.policy_network = "next_frame_basic_deterministic"
hparams_keys = hparams.values().keys()
video_hparams = basic_deterministic_params.next_frame_basic_deterministic()
for (name, value) in six.iteritems(video_hparams.values()):
if name in hparams_keys:
hparams.set_hparam(name, value)
else:
hparams.add_hparam(name, value)
# Mostly to avoid decaying WM params when training the policy.
hparams.weight_decay = 0
return hparams
@registry.register_hparams
def ppo_tiny_world_model():
"""Atari parameters with world model as policy."""
hparams = ppo_original_params()
hparams.policy_network = "next_frame_basic_deterministic"
hparams_keys = hparams.values().keys()
video_hparams = basic_deterministic_params.next_frame_tiny()
for (name, value) in six.iteritems(video_hparams.values()):
if name in hparams_keys:
hparams.set_hparam(name, value)
else:
hparams.add_hparam(name, value)
hparams.weight_decay = 0
return hparams
@registry.register_hparams
def ppo_original_world_model_stochastic_discrete():
"""Atari parameters with stochastic discrete world model as policy."""
hparams = ppo_original_params()
hparams.policy_network = "next_frame_basic_stochastic_discrete"
hparams_keys = hparams.values().keys()
video_hparams = basic_stochastic.next_frame_basic_stochastic_discrete()
for (name, value) in six.iteritems(video_hparams.values()):
if name in hparams_keys:
hparams.set_hparam(name, value)
else:
hparams.add_hparam(name, value)
# To avoid OOM. Probably way to small.
hparams.optimization_batch_size = 1
hparams.weight_decay = 0
return hparams
def make_real_env_fn(env):
"""Creates a function returning a given real env, in or out of graph.
Args:
env: Environment to return from the function.
Returns:
Function in_graph -> env.
"""
return lambda in_graph: PyFuncBatchEnv(env) if in_graph else env
def make_simulated_env_fn(**env_kwargs):
"""Returns a function creating a simulated env, in or out of graph.
Args:
**env_kwargs: kwargs to pass to the simulated env constructor.
Returns:
Function in_graph -> env.
"""
def env_fn(in_graph):
class_ = SimulatedBatchEnv if in_graph else SimulatedBatchGymEnv
return class_(**env_kwargs)
return env_fn
# TODO(koz4k): Move this and the one below to rl_utils.
def make_simulated_env_kwargs(real_env, hparams, **extra_kwargs):
"""Extracts simulated env kwargs from real_env and loop hparams."""
objs_and_attrs = [
(real_env, [
"reward_range", "observation_space", "action_space", "frame_height",
"frame_width"
]),
(hparams, ["frame_stack_size", "intrinsic_reward_scale"])
]
kwargs = {
attr: getattr(obj, attr) # pylint: disable=g-complex-comprehension
for (obj, attrs) in objs_and_attrs for attr in attrs
}
kwargs["model_name"] = hparams.generative_model
kwargs["model_hparams"] = trainer_lib.create_hparams(
hparams.generative_model_params
)
if hparams.wm_policy_param_sharing:
kwargs["model_hparams"].optimizer_zero_grads = True
kwargs.update(extra_kwargs)
return kwargs
def make_simulated_env_fn_from_hparams(real_env, hparams, **extra_kwargs):
"""Creates a simulated env_fn."""
return make_simulated_env_fn(
**make_simulated_env_kwargs(real_env, hparams, **extra_kwargs)
)
def get_policy(observations, hparams, action_space,
distributional_size=1, epoch=-1):
"""Get a policy network.
Args:
observations: observations
hparams: parameters
action_space: action space
distributional_size: optional number of buckets for distributional RL
epoch: optional epoch number
Returns:
Tuple (action logits, value).
"""
if not isinstance(action_space, gym.spaces.Discrete):
raise ValueError("Expecting discrete action space.")
obs_shape = common_layers.shape_list(observations)
(frame_height, frame_width) = obs_shape[2:4]
# TODO(afrozm): We have these dummy problems mainly for hparams, so cleanup
# when possible and do this properly.
if hparams.policy_problem_name == "dummy_policy_problem_ttt":
tf.logging.info("Using DummyPolicyProblemTTT for the policy.")
policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT()
else:
tf.logging.info("Using DummyPolicyProblem for the policy.")
policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width)
trainer_lib.add_problem_hparams(hparams, policy_problem)
hparams.force_full_predict = True
model = registry.model(hparams.policy_network)(
hparams, tf.estimator.ModeKeys.TRAIN
)
try:
num_target_frames = hparams.video_num_target_frames
except AttributeError:
num_target_frames = 1
target_value_shape_suffix = [num_target_frames]
if distributional_size > 1:
target_value_shape_suffix = [num_target_frames, distributional_size]
features = {
"inputs": observations,
"epoch": tf.constant(epoch + 1),
"input_action": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),
"input_reward": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),
"targets": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]),
"target_action": tf.zeros(
obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),
"target_reward": tf.zeros(
obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),
"target_policy": tf.zeros(
obs_shape[:1] + [num_target_frames] + [action_space.n]),
"target_value": tf.zeros(
obs_shape[:1] + target_value_shape_suffix)
}
model.distributional_value_size = max(distributional_size, 1)
model.use_epochs = hparams.use_epochs
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
t2t_model.create_dummy_vars()
(targets, _) = model(features)
target_values = targets["target_value"][:, 0]
if distributional_size > 1:
target_values = targets["target_value"][:, :]
return (targets["target_policy"][:, 0, :], target_values)
@registry.register_hparams
def ppo_pong_ae_base():
"""Pong autoencoder base parameters."""
hparams = ppo_original_params()
hparams.learning_rate_constant = 1e-4
hparams.network = "dense_bitwise_categorical_policy"
return hparams
@registry.register_hparams
def dqn_atari_base():
# These params are based on agents/dqn/configs/dqn.gin
# with some modifications taking into account our code
return hparam.HParams(
agent_gamma=0.99,
agent_update_horizon=1,
agent_min_replay_history=20000, # agent steps
agent_update_period=4,
agent_target_update_period=8000, # agent steps
agent_epsilon_train=0.01,
agent_epsilon_eval=0.001,
agent_epsilon_decay_period=250000, # agent steps
agent_generates_trainable_dones=True,
agent_type="VanillaDQN", # one of ["Rainbow", "VanillaDQN"]
optimizer_class="RMSProp",
optimizer_learning_rate=0.00025,
optimizer_decay=0.95,
optimizer_momentum=0.0,
optimizer_epsilon=0.00001,
optimizer_centered=True,
# TODO(kozak): change names maybe replay_buffer -> agent?
# Also batch_size is now buffer_batch_size in _DQNAgent.
replay_buffer_replay_capacity=1000000,
replay_buffer_buffer_batch_size=32,
time_limit=27000,
save_every_steps=50000,
num_frames=int(20 * 1e6),
# TODO(konradczechowski) this is not used in trainer_model_free, clean
# this up after evaluation refactor
eval_episodes_num=3,
)
@registry.register_hparams
def dqn_original_params():
"""dqn_original_params."""
hparams = dqn_atari_base()
hparams.set_hparam("num_frames", int(1e6))
return hparams
@registry.register_hparams
def dqn_guess1_params():
"""Guess 1 for DQN params."""
hparams = dqn_atari_base()
hparams.set_hparam("num_frames", int(1e6))
hparams.set_hparam("agent_update_period", 1)
hparams.set_hparam("agent_target_update_period", 400)
# Small replay buffer size was set for mistake, but it seems to work
hparams.set_hparam("replay_buffer_replay_capacity", 10000)
return hparams
@registry.register_hparams
def dqn_guess1_params_eval():
"""Params for dqn_guess1 evaluation (with evaluator.py)."""
hparams = dqn_guess1_params()
hparams.set_hparam("eval_episodes_num", 64)
return hparams
@registry.register_hparams
def dqn_guess1_rainbow_params():
"""Guess 1 for DQN params."""
hparams = dqn_guess1_params()
hparams.set_hparam("agent_type", "Rainbow")
return hparams
@registry.register_hparams
def dqn_rainbow_params():
"""Rainbow params."""
hparams = dqn_guess1_params()
hparams.set_hparam("agent_type", "Rainbow")
hparams.set_hparam("replay_buffer_replay_capacity", int(2e6) + int(1e5))
return hparams
@registry.register_hparams
def dqn_2m_replay_buffer_params():
"""Guess 1 for DQN params, 2 milions transitions in replay buffer."""
hparams = dqn_guess1_params()
hparams.set_hparam("replay_buffer_replay_capacity", int(2e6) + int(1e5))
return hparams
@registry.register_hparams
def dqn_10m_replay_buffer_params():
"""Guess 1 for DQN params, 10 milions transitions in replay buffer."""
hparams = dqn_guess1_params()
hparams.set_hparam("replay_buffer_replay_capacity", int(10e6))
return hparams
def rlmf_tiny_overrides():
"""Parameters to override for tiny setting excluding agent-related hparams."""
return dict(
max_num_noops=1,
eval_max_num_noops=1,
rl_env_max_episode_steps=7,
eval_rl_env_max_episode_steps=7,
eval_sampling_temps=[0.0, 1.0],
)
@registry.register_hparams
def rlmf_original():
return hparam.HParams(
game="pong",
sticky_actions=False,
base_algo="ppo",
base_algo_params="ppo_original_params",
batch_size=16,
eval_batch_size=2,
frame_stack_size=4,
eval_sampling_temps=[0.0, 0.2, 0.5, 0.8, 1.0, 2.0],
max_num_noops=8,
eval_max_num_noops=8,
eval_rl_env_max_episode_steps=1000,
resize_height_factor=2,
resize_width_factor=2,
distributional_size=1, # In distributional RL, number of buckets.
distributional_subscale=0.04, # How to scale values to buckets.
distributional_threshold=0.0, # Optimism threshold for experiments.
grayscale=0,
rl_env_max_episode_steps=-1,
# If set, use this as the gym env name, instead of changing game mode etc.
rl_env_name="",
# Controls whether we should derive observation space, do some
# pre-processing etc. See T2TGymEnv._derive_observation_space.
rl_should_derive_observation_space=True,
aunused=0, # unused param for multi-run settings.
)
@registry.register_hparams
def rlmf_tictactoe():
"""Base set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams.game = "tictactoe"
hparams.rl_env_name = "T2TEnv-TicTacToeEnv-v0"
# Since we don't have any no-op actions, otherwise we have to have an
# attribute called `get_action_meanings`.
hparams.eval_max_num_noops = 0
hparams.max_num_noops = 0
hparams.rl_should_derive_observation_space = False
hparams.policy_network = "feed_forward_categorical_policy"
hparams.base_algo_params = "ppo_ttt_params"
# Number of last observations to feed to the agent
hparams.frame_stack_size = 1
return hparams
@registry.register_hparams
def rlmf_base():
"""Base set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams.add_hparam("ppo_epochs_num", 3000)
hparams.add_hparam("ppo_eval_every_epochs", 100)
return hparams
@registry.register_ranged_hparams
def rlmf_5runs(rhp):
rhp.set_discrete("aunused", list(range(5)))
@registry.register_ranged_hparams
def rlmf_5runs_atari(rhp):
rhp.set_categorical("game", gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE)
rhp.set_discrete("aunused", list(range(5)))
@registry.register_hparams
def rlmf_dist():
"""Distributional set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams.distributional_size = 1024
hparams.base_algo_params = "ppo_dist_params"
return hparams
@registry.register_hparams
def rlmf_dist_threshold():
"""Distributional set of hparams for model-free PPO."""
hparams = rlmf_dist()
hparams.distributional_threshold = 0.5
return hparams
@registry.register_hparams
def rlmf_tiny():
"""Tiny set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams = hparams.override_from_dict(rlmf_tiny_overrides())
hparams.batch_size = 2
hparams.base_algo_params = "ppo_original_tiny"
hparams.add_hparam("ppo_epochs_num", 3)
hparams.add_hparam("ppo_epoch_length", 2)
return hparams
@registry.register_hparams
def rlmf_dqn_tiny():
"""Tiny DQN params."""
hparams = rlmf_original()
hparams = hparams.override_from_dict(rlmf_tiny_overrides())
hparams.batch_size = 1
hparams.base_algo = "dqn"
hparams.base_algo_params = "dqn_original_params"
hparams.add_hparam("dqn_num_frames", 128)
hparams.add_hparam("dqn_save_every_steps", 128)
hparams.add_hparam("dqn_replay_buffer_replay_capacity", 100)
hparams.add_hparam("dqn_agent_min_replay_history", 10)
return hparams
@registry.register_hparams
def rlmf_eval():
"""Eval set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams.batch_size = 16
hparams.eval_batch_size = 32
hparams.eval_episodes_num = 2
hparams.eval_sampling_temps = [0.5, 0.0, 1.0]
hparams.eval_rl_env_max_episode_steps = 40000
hparams.add_hparam("ppo_epoch_length", 128)
hparams.add_hparam("ppo_optimization_batch_size", 32)
hparams.add_hparam("ppo_epochs_num", 10000)
hparams.add_hparam("ppo_eval_every_epochs", 500)
hparams.add_hparam("attempt", 0)
hparams.add_hparam("moe_loss_coef", 0)
return hparams
@registry.register_hparams
def rlmf_eval_dist():
"""Distributional set of hparams for model-free PPO."""
hparams = rlmf_eval()
hparams.distributional_size = 4096
hparams.distributional_subscale = 0.08
hparams.base_algo_params = "ppo_dist_params"
return hparams
@registry.register_hparams
def rlmf_eval_dist_threshold():
"""Distributional set of hparams for model-free PPO."""
hparams = rlmf_eval_dist()
hparams.distributional_threshold = 0.5
return hparams
class PolicyBase(t2t_model.T2TModel):
def __init__(self, *args, **kwargs):
super(PolicyBase, self).__init__(*args, **kwargs)
self.distributional_value_size = 1
self.use_epochs = False
def loss(self, *args, **kwargs):
return 0.0
# TODO(lukaszkaiser): move this class or clean up the whole file.
class DummyPolicyProblem(video_utils.VideoProblem):
"""Dummy Problem for running the policy."""
def __init__(self, action_space, frame_height, frame_width):
super(DummyPolicyProblem, self).__init__()
self.action_space = action_space
self._frame_height = frame_height
self._frame_width = frame_width
@property
def frame_height(self):
"""Height of each frame."""
return self._frame_height
@property
def frame_width(self):
"""Width of each frame."""
return self._frame_width
@property
def num_actions(self):
return self.action_space.n
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {
"inputs": modalities.ModalityType.VIDEO,
"input_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL,
"input_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL,
"targets": modalities.ModalityType.VIDEO,
"target_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL,
"target_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL,
"target_policy": modalities.ModalityType.IDENTITY,
"target_value": modalities.ModalityType.IDENTITY,
}
p.vocab_size = {
"inputs": 256,
"input_action": self.num_actions,
"input_reward": 3,
"targets": 256,
"target_action": self.num_actions,
"target_reward": 3,
"target_policy": None,
"target_value": None,
}
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = problem.SpaceID.IMAGE
NetworkOutput = collections.namedtuple(
"NetworkOutput", "policy, value, action_postprocessing")
# TODO(koz4k): Translate it to T2TModel or remove.
def feed_forward_gaussian_fun(action_space, config, observations):
"""Feed-forward Gaussian."""
if not isinstance(action_space, gym.spaces.box.Box):
raise ValueError("Expecting continuous action space.")
mean_weights_initializer = tf.initializers.variance_scaling(
scale=config.init_mean_factor)
logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("network_parameters"):
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
mean = tf.layers.dense(
x, action_space.shape[0], activation=tf.tanh,
kernel_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
value = tf.layers.dense(x, 1)[..., 0]
mean = tf.check_numerics(mean, "mean")
logstd = tf.check_numerics(logstd, "logstd")
value = tf.check_numerics(value, "value")
policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd))
return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2))
def clip_logits(logits, config):
logits_clip = getattr(config, "logits_clip", 0.)
if logits_clip > 0:
min_logit = tf.reduce_min(logits)
return tf.minimum(logits - min_logit, logits_clip)
else:
return logits
@registry.register_model
class FeedForwardCategoricalPolicy(PolicyBase):
"""Feed-forward categorical."""
def body(self, features):
observations = features["inputs_raw"]
observations = tf.cast(observations, tf.float32)
flat_observations = tf.layers.flatten(observations)
with tf.variable_scope("policy"):
x = flat_observations
for size in self.hparams.policy_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
logits = tf.layers.dense(x, self.hparams.problem.num_actions)
logits = tf.expand_dims(logits, axis=1)
with tf.variable_scope("value"):
x = flat_observations
for size in self.hparams.value_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
value = tf.layers.dense(x, 1)
logits = clip_logits(logits, self.hparams)
return {"target_policy": logits, "target_value": value}
@registry.register_model
class FeedForwardCnnSmallCategoricalPolicy(PolicyBase):
"""Small cnn network with categorical output."""
def body(self, features):
observations = features["inputs_raw"]
# Axis 0 - Batch.
# Axis 1 - Input Frames, 4 frames.
# Axis 2, 3 - Height & Width.
# Axis 4 - Channels RGB, 3 colours.
x = tf.transpose(observations, [0, 2, 3, 1, 4])
x_shape = common_layers.shape_list(x)
x = tf.reshape(x, x_shape[:-2] + [-1])
dropout = getattr(self.hparams, "dropout_ppo", 0.0)
with tf.variable_scope("feed_forward_cnn_small"):
x = tf.cast(x, tf.float32) / 255.0
x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2),
activation=tf.nn.relu, padding="same")
x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2),
activation=tf.nn.relu, padding="same")
flat_x = tf.layers.flatten(x)
if self.use_epochs:
epoch = features["epoch"] + tf.zeros([x_shape[0]], dtype=tf.int32)
# Randomly set epoch to 0 in some cases as that's the inference value.
rand = tf.random.uniform([x_shape[0]])
epoch = tf.where(rand < 0.1, tf.zeros_like(epoch), epoch)
# Embed the epoch number.
emb_epoch = common_layers.embedding(epoch, 32, 32) # [batch, 32]
flat_x = tf.concat([flat_x, emb_epoch], axis=1)
flat_x = tf.layers.dropout(flat_x, rate=dropout)
x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu)
logits = tf.layers.dense(
x, self.hparams.problem.num_actions, name="dense2"
)
logits = clip_logits(logits, self.hparams)
logits = tf.expand_dims(logits, axis=1)
value = tf.layers.dense(x, self.distributional_value_size)
return {"target_policy": logits, "target_value": value}
@registry.register_model
class FeedForwardCnnSmallCategoricalPolicyNew(PolicyBase):
"""Small cnn network with categorical output."""
def body(self, features):
observations = features["inputs"]
x = tf.transpose(observations, [0, 2, 3, 1, 4])
x_shape = common_layers.shape_list(x)
x = tf.reshape(x, x_shape[:-2] + [-1])
dropout = getattr(self.hparams, "dropout_ppo", 0.0)
with tf.variable_scope("feed_forward_cnn_small"):
x = tf.cast(x, tf.float32) / 255.0
x = tf.nn.dropout(x, rate=dropout)
x = tf.layers.conv2d(
x, 32, (4, 4), strides=(2, 2), name="conv1",
activation=common_layers.belu, padding="SAME")
x = tf.nn.dropout(x, rate=dropout)
x = tf.layers.conv2d(
x, 64, (4, 4), strides=(2, 2), name="conv2",
activation=common_layers.belu, padding="SAME")
x = tf.nn.dropout(x, rate=dropout)
x = tf.layers.conv2d(
x, 128, (4, 4), strides=(2, 2), name="conv3",
activation=common_layers.belu, padding="SAME")
flat_x = tf.layers.flatten(x)
flat_x = tf.nn.dropout(flat_x, rate=dropout)
x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu, name="dense1")
logits = tf.layers.dense(
x, self.hparams.problem.num_actions, name="dense2"
)
logits = tf.expand_dims(logits, axis=1)
logits = clip_logits(logits, self.hparams)
value = tf.layers.dense(x, 1, name="value")
return {"target_policy": logits, "target_value": value}
@registry.register_model
class DenseBitwiseCategoricalPolicy(PolicyBase):
"""Dense network with bitwise input and categorical output."""
def body(self, features):
observations = features["inputs"]
flat_x = tf.layers.flatten(observations)
with tf.variable_scope("dense_bitwise"):
flat_x = discretization.int_to_bit_embed(flat_x, 8, 32)
x = tf.layers.dense(flat_x, 256, activation=tf.nn.relu)
x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu)
logits = tf.layers.dense(x, self.hparams.problem.num_actions)
value = tf.layers.dense(x, 1)[..., 0]
return {"target_policy": logits, "target_value": value}
@registry.register_model
class RandomPolicy(PolicyBase):
"""Random policy with categorical output."""
def body(self, features):
observations = features["inputs"]
obs_shape = observations.shape.as_list()
# Just so Saver doesn't complain because of no variables.
tf.get_variable("dummy_var", initializer=0.0)
num_actions = self.hparams.problem.num_actions
logits = tf.constant(
1. / float(num_actions),
shape=(obs_shape[:1] + [1, num_actions])
)
value = tf.zeros(obs_shape[:1] + [1])
return {"target_policy": logits, "target_value": value}
| tensorflow/tensor2tensor | tensor2tensor/models/research/rl.py | Python | apache-2.0 | 29,682 | [
"Gaussian"
] | 95d031cbe72ede90e09bb4c8ce9a980413b66efe7b69ff465f3c635a76d57451 |
# beam_search.py - progressive widening beam search
#
# Copyright 2016 NetworkX developers.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
"""Beam search with dynamic beam width.
The progressive widening beam search repeatedly executes a beam search
with increasing beam width until the target node is found.
"""
import math
import networkx as nx
def progressive_widening_search(G, source, value, condition, initial_width=1):
"""Progressive widening beam search to find a node.
The progressive widening beam search involves a repeated beam
search, starting with a small beam width then extending to
progressively larger beam widths if the target node is not
found. This implementation simply returns the first node found that
matches the termination condition.
`G` is a NetworkX graph.
`source` is a node in the graph. The search for the node of interest
begins here and extends only to those nodes in the (weakly)
connected component of this node.
`value` is a function that returns a real number indicating how good
a potential neighbor node is when deciding which neighbor nodes to
enqueue in the breadth-first search. Only the best nodes within the
current beam width will be enqueued at each step.
`condition` is the termination condition for the search. This is a
function that takes a node as input and return a Boolean indicating
whether the node is the target. If no node matches the termination
condition, this function raises :exc:`NodeNotFound`.
`initial_width` is the starting beam width for the beam search (the
default is one). If no node matching the `condition` is found with
this beam width, the beam search is restarted from the `source` node
with a beam width that is twice as large (so the beam width
increases exponentially). The search terminates after the beam width
exceeds the number of nodes in the graph.
"""
# Check for the special case in which the source node satisfies the
# termination condition.
if condition(source):
return source
# The largest possible value of `i` in this range yields a width at
# least the number of nodes in the graph, so the final invocation of
# `bfs_beam_edges` is equivalent to a plain old breadth-first
# search. Therefore, all nodes will eventually be visited.
#
# TODO In Python 3.3+, this should be `math.log2(len(G))`.
log_m = math.ceil(math.log(len(G), 2))
for i in range(log_m):
width = initial_width * pow(2, i)
# Since we are always starting from the same source node, this
# search may visit the same nodes many times (depending on the
# implementation of the `value` function).
for u, v in nx.bfs_beam_edges(G, source, value, width):
if condition(v):
return v
# At this point, since all nodes have been visited, we know that
# none of the nodes satisfied the termination condition.
raise nx.NodeNotFound('no node satisfied the termination condition')
def main():
"""Search for a node with high centrality.
In this example, we generate a random graph, compute the centrality
of each node, then perform the progressive widening search in order
to find a node of high centrality.
"""
G = nx.gnp_random_graph(100, 0.5)
centrality = nx.eigenvector_centrality(G)
avg_centrality = sum(centrality.values()) / len(G)
def has_high_centrality(v):
return centrality[v] >= avg_centrality
source = 0
value = centrality.get
condition = has_high_centrality
found_node = progressive_widening_search(G, source, value, condition)
c = centrality[found_node]
print('found node {0} with centrality {1}'.format(found_node, c))
if __name__ == '__main__':
main()
| JamesClough/networkx | examples/algorithms/beam_search.py | Python | bsd-3-clause | 3,912 | [
"VisIt"
] | 0663c6c531e97f4a5c1e0f42bdd501d1a44612b5c36c378a45f603b114cc527b |
import sys
import pytest
import numpy as np
from phono3py.sscha.sscha import (
DispCorrMatrix, DispCorrMatrixMesh,
SupercellPhonon, ThirdOrderFC)
from phonopy.phonon.qpoints import QpointsPhonon
from phonopy.phonon.random_displacements import RandomDisplacements
try:
ModuleNotFoundError
except NameError:
ModuleNotFoundError = ImportError
si_pbesol_upsilon0_0 = [[3.849187e+02, 0, 0],
[0, 3.849187e+02, 0],
[0, 0, 3.849187e+02]]
si_pbesol_upsilon1_34 = [[1.886404, -1.549705, -1.126055],
[-1.549705, 1.886404, -1.126055],
[-1.126055, -1.126055, -0.006187]]
si_pbesol_111_freqs = [
0.00000, 0.00000, 0.00000, 4.02839, 4.02839, 4.02839,
4.02839, 4.02839, 4.02839, 12.13724, 12.13724, 12.13724,
12.13724, 12.13724, 12.13724, 13.71746, 13.71746, 13.71746,
13.71746, 13.71746, 13.71746, 15.24974, 15.24974, 15.24974]
def get_supercell_phonon(ph3):
ph3.mesh_numbers = [1, 1, 1]
ph3.init_phph_interaction()
fc2 = ph3.dynamical_matrix.force_constants
supercell = ph3.phonon_supercell
factor = ph3.unit_conversion_factor
return SupercellPhonon(supercell, fc2, frequency_factor_to_THz=factor)
def mass_sand(matrix, mass):
return ((matrix * mass).T * mass).T
def mass_inv(matrix, mass):
bare = mass_sand(matrix, mass)
inv_bare = np.linalg.pinv(bare)
return mass_sand(inv_bare, mass)
def test_SupercellPhonon(si_pbesol_111):
sph = get_supercell_phonon(si_pbesol_111)
np.testing.assert_allclose(
si_pbesol_111_freqs, sph.frequencies, atol=1e-4)
def test_disp_corr_matrix_mesh(si_pbesol):
si_pbesol.mesh_numbers = [9, 9, 9]
si_pbesol.init_phph_interaction()
dynmat = si_pbesol.dynamical_matrix
uu = DispCorrMatrixMesh(dynmat.primitive, dynmat.supercell)
qpoints_phonon = QpointsPhonon(uu.commensurate_points,
dynmat,
with_eigenvectors=True)
freqs = qpoints_phonon.frequencies
eigvecs = qpoints_phonon.eigenvectors
uu.run(freqs, eigvecs, 300.0)
np.testing.assert_allclose(
si_pbesol_upsilon0_0, uu.upsilon_matrix[0:3, 0:3], atol=1e-4)
np.testing.assert_allclose(
si_pbesol_upsilon1_34, uu.upsilon_matrix[1 * 3: 2 * 3, 34 * 3: 35 * 3],
atol=1e-4)
sqrt_masses = np.repeat(np.sqrt(si_pbesol.supercell.masses), 3)
uu_inv = mass_inv(uu.psi_matrix, sqrt_masses)
np.testing.assert_allclose(uu.upsilon_matrix, uu_inv, atol=1e-8, rtol=0)
def test_disp_corr_matrix(si_pbesol):
supercell_phonon = get_supercell_phonon(si_pbesol)
uu = DispCorrMatrix(supercell_phonon)
uu.run(300.0)
np.testing.assert_allclose(
si_pbesol_upsilon0_0, uu.upsilon_matrix[0:3, 0:3], atol=1e-4)
np.testing.assert_allclose(
si_pbesol_upsilon1_34,
uu.upsilon_matrix[1 * 3: 2 * 3, 34 * 3: 35 * 3],
atol=1e-4)
def test_disp_corr_matrix_si(si_pbesol):
_test_disp_corr_matrix(si_pbesol)
def test_disp_corr_matrix_nacl(nacl_pbe):
_test_disp_corr_matrix(nacl_pbe)
def _test_disp_corr_matrix(ph3):
supercell_phonon = get_supercell_phonon(ph3)
uu = DispCorrMatrix(supercell_phonon)
uu.run(300.0)
sqrt_masses = np.repeat(np.sqrt(ph3.supercell.masses), 3)
uu_inv = mass_inv(uu.psi_matrix, sqrt_masses)
np.testing.assert_allclose(
uu.upsilon_matrix, uu_inv, atol=1e-8, rtol=0)
rd = RandomDisplacements(ph3.supercell,
ph3.primitive,
ph3.fc2)
rd.run_correlation_matrix(300)
rd_uu_inv = np.transpose(rd.uu_inv,
axes=[0, 2, 1, 3]).reshape(uu_inv.shape)
np.testing.assert_allclose(
uu.upsilon_matrix, rd_uu_inv, atol=1e-8, rtol=0)
def test_fc3(si_pbesol_iterha_111):
try:
import alm
except ModuleNotFoundError:
pytest.skip("Skip this test because ALM module was not found.")
ph = si_pbesol_iterha_111
ph.produce_force_constants(calculate_full_force_constants=True,
fc_calculator='alm')
supercell_phonon = SupercellPhonon(
ph.supercell, ph.force_constants,
frequency_factor_to_THz=ph.unit_conversion_factor)
fc3 = ThirdOrderFC(ph.displacements, ph.forces, supercell_phonon)
fc3.run(T=300)
| atztogo/phono3py | test/sscha/test_sscha.py | Python | bsd-3-clause | 4,373 | [
"phonopy"
] | 1886fe5e38c17437d7dde0764488083d5d2a5d9a2fddb4e108d1c50d3779f9c9 |
"""
Functions for manipulating, inspecting, or otherwise working with data types
and data structures.
"""
import copy
import datetime
import fnmatch
import functools
import logging
import re
from collections.abc import Mapping, MutableMapping, Sequence
import salt.utils.dictupdate
import salt.utils.stringutils
import salt.utils.yaml
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import SaltException
from salt.utils.decorators.jinja import jinja_filter
from salt.utils.odict import OrderedDict
try:
import jmespath
except ImportError:
jmespath = None
log = logging.getLogger(__name__)
class CaseInsensitiveDict(MutableMapping):
"""
Inspired by requests' case-insensitive dict implementation, but works with
non-string keys as well.
"""
def __init__(self, init=None, **kwargs):
"""
Force internal dict to be ordered to ensure a consistent iteration
order, irrespective of case.
"""
self._data = OrderedDict()
self.update(init or {}, **kwargs)
def __len__(self):
return len(self._data)
def __setitem__(self, key, value):
# Store the case-sensitive key so it is available for dict iteration
self._data[to_lowercase(key)] = (key, value)
def __delitem__(self, key):
del self._data[to_lowercase(key)]
def __getitem__(self, key):
return self._data[to_lowercase(key)][1]
def __iter__(self):
return (item[0] for item in self._data.values())
def __eq__(self, rval):
if not isinstance(rval, Mapping):
# Comparing to non-mapping type (e.g. int) is always False
return False
return dict(self.items_lower()) == dict(CaseInsensitiveDict(rval).items_lower())
def __repr__(self):
return repr(dict(self.items()))
def items_lower(self):
"""
Returns a generator iterating over keys and values, with the keys all
being lowercase.
"""
return ((key, val[1]) for key, val in self._data.items())
def copy(self):
"""
Returns a copy of the object
"""
return CaseInsensitiveDict(self._data.items())
def __change_case(data, attr, preserve_dict_class=False):
"""
Calls data.attr() if data has an attribute/method called attr.
Processes data recursively if data is a Mapping or Sequence.
For Mapping, processes both keys and values.
"""
try:
return getattr(data, attr)()
except AttributeError:
pass
data_type = data.__class__
if isinstance(data, Mapping):
return (data_type if preserve_dict_class else dict)(
(
__change_case(key, attr, preserve_dict_class),
__change_case(val, attr, preserve_dict_class),
)
for key, val in data.items()
)
if isinstance(data, Sequence):
return data_type(
__change_case(item, attr, preserve_dict_class) for item in data
)
return data
def to_lowercase(data, preserve_dict_class=False):
"""
Recursively changes everything in data to lowercase.
"""
return __change_case(data, "lower", preserve_dict_class)
def to_uppercase(data, preserve_dict_class=False):
"""
Recursively changes everything in data to uppercase.
"""
return __change_case(data, "upper", preserve_dict_class)
@jinja_filter("compare_dicts")
def compare_dicts(old=None, new=None):
"""
Compare before and after results from various salt functions, returning a
dict describing the changes that were made.
"""
ret = {}
for key in set(new or {}).union(old or {}):
if key not in old:
# New key
ret[key] = {"old": "", "new": new[key]}
elif key not in new:
# Key removed
ret[key] = {"new": "", "old": old[key]}
elif new[key] != old[key]:
# Key modified
ret[key] = {"old": old[key], "new": new[key]}
return ret
@jinja_filter("compare_lists")
def compare_lists(old=None, new=None):
"""
Compare before and after results from various salt functions, returning a
dict describing the changes that were made
"""
ret = {}
for item in new:
if item not in old:
ret.setdefault("new", []).append(item)
for item in old:
if item not in new:
ret.setdefault("old", []).append(item)
return ret
def _remove_circular_refs(ob, _seen=None):
"""
Generic method to remove circular references from objects.
This has been taken from author Martijn Pieters
https://stackoverflow.com/questions/44777369/
remove-circular-references-in-dicts-lists-tuples/44777477#44777477
:param ob: dict, list, typle, set, and frozenset
Standard python object
:param object _seen:
Object that has circular reference
:returns:
Cleaned Python object
:rtype:
type(ob)
"""
if _seen is None:
_seen = set()
if id(ob) in _seen:
# Here we caught a circular reference.
# Alert user and cleanup to continue.
log.exception(
"Caught a circular reference in data structure below."
"Cleaning and continuing execution.\n%r\n",
ob,
)
return None
_seen.add(id(ob))
res = ob
if isinstance(ob, dict):
res = {
_remove_circular_refs(k, _seen): _remove_circular_refs(v, _seen)
for k, v in ob.items()
}
elif isinstance(ob, (list, tuple, set, frozenset)):
res = type(ob)(_remove_circular_refs(v, _seen) for v in ob)
# remove id again; only *nested* references count
_seen.remove(id(ob))
return res
def decode(
data,
encoding=None,
errors="strict",
keep=False,
normalize=False,
preserve_dict_class=False,
preserve_tuples=False,
to_str=False,
):
"""
Generic function which will decode whichever type is passed, if necessary.
Optionally use to_str=True to ensure strings are str types and not unicode
on Python 2.
If `strict` is True, and `keep` is False, and we fail to decode, a
UnicodeDecodeError will be raised. Passing `keep` as True allows for the
original value to silently be returned in cases where decoding fails. This
can be useful for cases where the data passed to this function is likely to
contain binary blobs, such as in the case of cp.recv.
If `normalize` is True, then unicodedata.normalize() will be used to
normalize unicode strings down to a single code point per glyph. It is
recommended not to normalize unless you know what you're doing. For
instance, if `data` contains a dictionary, it is possible that normalizing
will lead to data loss because the following two strings will normalize to
the same value:
- u'\\u044f\\u0438\\u0306\\u0446\\u0430.txt'
- u'\\u044f\\u0439\\u0446\\u0430.txt'
One good use case for normalization is in the test suite. For example, on
some platforms such as Mac OS, os.listdir() will produce the first of the
two strings above, in which "й" is represented as two code points (i.e. one
for the base character, and one for the breve mark). Normalizing allows for
a more reliable test case.
"""
# Clean data object before decoding to avoid circular references
data = _remove_circular_refs(data)
_decode_func = (
salt.utils.stringutils.to_unicode
if not to_str
else salt.utils.stringutils.to_str
)
if isinstance(data, Mapping):
return decode_dict(
data,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
if isinstance(data, list):
return decode_list(
data,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
if isinstance(data, tuple):
return (
decode_tuple(
data, encoding, errors, keep, normalize, preserve_dict_class, to_str
)
if preserve_tuples
else decode_list(
data,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
)
if isinstance(data, datetime.datetime):
return data.isoformat()
try:
data = _decode_func(data, encoding, errors, normalize)
except TypeError:
# to_unicode raises a TypeError when input is not a
# string/bytestring/bytearray. This is expected and simply means we
# are going to leave the value as-is.
pass
except UnicodeDecodeError:
if not keep:
raise
return data
def decode_dict(
data,
encoding=None,
errors="strict",
keep=False,
normalize=False,
preserve_dict_class=False,
preserve_tuples=False,
to_str=False,
):
"""
Decode all string values to Unicode. Optionally use to_str=True to ensure
strings are str types and not unicode on Python 2.
"""
# Clean data object before decoding to avoid circular references
data = _remove_circular_refs(data)
# Make sure we preserve OrderedDicts
ret = data.__class__() if preserve_dict_class else {}
for key, value in data.items():
if isinstance(key, tuple):
key = (
decode_tuple(
key, encoding, errors, keep, normalize, preserve_dict_class, to_str
)
if preserve_tuples
else decode_list(
key,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
)
else:
try:
key = decode(
key,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
except TypeError:
# to_unicode raises a TypeError when input is not a
# string/bytestring/bytearray. This is expected and simply
# means we are going to leave the value as-is.
pass
except UnicodeDecodeError:
if not keep:
raise
if isinstance(value, list):
value = decode_list(
value,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
elif isinstance(value, tuple):
value = (
decode_tuple(
value,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
to_str,
)
if preserve_tuples
else decode_list(
value,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
)
elif isinstance(value, Mapping):
value = decode_dict(
value,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
else:
try:
value = decode(
value,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
except TypeError as e:
# to_unicode raises a TypeError when input is not a
# string/bytestring/bytearray. This is expected and simply
# means we are going to leave the value as-is.
pass
except UnicodeDecodeError:
if not keep:
raise
ret[key] = value
return ret
def decode_list(
data,
encoding=None,
errors="strict",
keep=False,
normalize=False,
preserve_dict_class=False,
preserve_tuples=False,
to_str=False,
):
"""
Decode all string values to Unicode. Optionally use to_str=True to ensure
strings are str types and not unicode on Python 2.
"""
# Clean data object before decoding to avoid circular references
data = _remove_circular_refs(data)
ret = []
for item in data:
if isinstance(item, list):
item = decode_list(
item,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
elif isinstance(item, tuple):
item = (
decode_tuple(
item, encoding, errors, keep, normalize, preserve_dict_class, to_str
)
if preserve_tuples
else decode_list(
item,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
)
elif isinstance(item, Mapping):
item = decode_dict(
item,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
else:
try:
item = decode(
item,
encoding,
errors,
keep,
normalize,
preserve_dict_class,
preserve_tuples,
to_str,
)
except TypeError:
# to_unicode raises a TypeError when input is not a
# string/bytestring/bytearray. This is expected and simply
# means we are going to leave the value as-is.
pass
except UnicodeDecodeError:
if not keep:
raise
ret.append(item)
return ret
def decode_tuple(
data,
encoding=None,
errors="strict",
keep=False,
normalize=False,
preserve_dict_class=False,
to_str=False,
):
"""
Decode all string values to Unicode. Optionally use to_str=True to ensure
strings are str types and not unicode on Python 2.
"""
return tuple(
decode_list(
data, encoding, errors, keep, normalize, preserve_dict_class, True, to_str
)
)
def encode(
data,
encoding=None,
errors="strict",
keep=False,
preserve_dict_class=False,
preserve_tuples=False,
):
"""
Generic function which will encode whichever type is passed, if necessary
If `strict` is True, and `keep` is False, and we fail to encode, a
UnicodeEncodeError will be raised. Passing `keep` as True allows for the
original value to silently be returned in cases where encoding fails. This
can be useful for cases where the data passed to this function is likely to
contain binary blobs.
"""
# Clean data object before encoding to avoid circular references
data = _remove_circular_refs(data)
if isinstance(data, Mapping):
return encode_dict(
data, encoding, errors, keep, preserve_dict_class, preserve_tuples
)
if isinstance(data, list):
return encode_list(
data, encoding, errors, keep, preserve_dict_class, preserve_tuples
)
if isinstance(data, tuple):
return (
encode_tuple(data, encoding, errors, keep, preserve_dict_class)
if preserve_tuples
else encode_list(
data, encoding, errors, keep, preserve_dict_class, preserve_tuples
)
)
try:
return salt.utils.stringutils.to_bytes(data, encoding, errors)
except TypeError:
# to_bytes raises a TypeError when input is not a
# string/bytestring/bytearray. This is expected and simply
# means we are going to leave the value as-is.
pass
except UnicodeEncodeError:
if not keep:
raise
return data
@jinja_filter("json_decode_dict") # Remove this for Aluminium
@jinja_filter("json_encode_dict")
def encode_dict(
data,
encoding=None,
errors="strict",
keep=False,
preserve_dict_class=False,
preserve_tuples=False,
):
"""
Encode all string values to bytes
"""
# Clean data object before encoding to avoid circular references
data = _remove_circular_refs(data)
ret = data.__class__() if preserve_dict_class else {}
for key, value in data.items():
if isinstance(key, tuple):
key = (
encode_tuple(key, encoding, errors, keep, preserve_dict_class)
if preserve_tuples
else encode_list(
key, encoding, errors, keep, preserve_dict_class, preserve_tuples
)
)
else:
try:
key = salt.utils.stringutils.to_bytes(key, encoding, errors)
except TypeError:
# to_bytes raises a TypeError when input is not a
# string/bytestring/bytearray. This is expected and simply
# means we are going to leave the value as-is.
pass
except UnicodeEncodeError:
if not keep:
raise
if isinstance(value, list):
value = encode_list(
value, encoding, errors, keep, preserve_dict_class, preserve_tuples
)
elif isinstance(value, tuple):
value = (
encode_tuple(value, encoding, errors, keep, preserve_dict_class)
if preserve_tuples
else encode_list(
value, encoding, errors, keep, preserve_dict_class, preserve_tuples
)
)
elif isinstance(value, Mapping):
value = encode_dict(
value, encoding, errors, keep, preserve_dict_class, preserve_tuples
)
else:
try:
value = salt.utils.stringutils.to_bytes(value, encoding, errors)
except TypeError:
# to_bytes raises a TypeError when input is not a
# string/bytestring/bytearray. This is expected and simply
# means we are going to leave the value as-is.
pass
except UnicodeEncodeError:
if not keep:
raise
ret[key] = value
return ret
@jinja_filter("json_decode_list") # Remove this for Aluminium
@jinja_filter("json_encode_list")
def encode_list(
data,
encoding=None,
errors="strict",
keep=False,
preserve_dict_class=False,
preserve_tuples=False,
):
"""
Encode all string values to bytes
"""
# Clean data object before encoding to avoid circular references
data = _remove_circular_refs(data)
ret = []
for item in data:
if isinstance(item, list):
item = encode_list(
item, encoding, errors, keep, preserve_dict_class, preserve_tuples
)
elif isinstance(item, tuple):
item = (
encode_tuple(item, encoding, errors, keep, preserve_dict_class)
if preserve_tuples
else encode_list(
item, encoding, errors, keep, preserve_dict_class, preserve_tuples
)
)
elif isinstance(item, Mapping):
item = encode_dict(
item, encoding, errors, keep, preserve_dict_class, preserve_tuples
)
else:
try:
item = salt.utils.stringutils.to_bytes(item, encoding, errors)
except TypeError:
# to_bytes raises a TypeError when input is not a
# string/bytestring/bytearray. This is expected and simply
# means we are going to leave the value as-is.
pass
except UnicodeEncodeError:
if not keep:
raise
ret.append(item)
return ret
def encode_tuple(
data, encoding=None, errors="strict", keep=False, preserve_dict_class=False
):
"""
Encode all string values to Unicode
"""
return tuple(encode_list(data, encoding, errors, keep, preserve_dict_class, True))
@jinja_filter("exactly_n_true")
def exactly_n(iterable, amount=1):
"""
Tests that exactly N items in an iterable are "truthy" (neither None,
False, nor 0).
"""
i = iter(iterable)
return all(any(i) for j in range(amount)) and not any(i)
@jinja_filter("exactly_one_true")
def exactly_one(iterable):
"""
Check if only one item is not None, False, or 0 in an iterable.
"""
return exactly_n(iterable)
def filter_by(lookup_dict, lookup, traverse, merge=None, default="default", base=None):
"""
Common code to filter data structures like grains and pillar
"""
ret = None
# Default value would be an empty list if lookup not found
val = traverse_dict_and_list(traverse, lookup, [])
# Iterate over the list of values to match against patterns in the
# lookup_dict keys
for each in val if isinstance(val, list) else [val]:
for key in lookup_dict:
test_key = key if isinstance(key, str) else str(key)
test_each = each if isinstance(each, str) else str(each)
if fnmatch.fnmatchcase(test_each, test_key):
ret = lookup_dict[key]
break
if ret is not None:
break
if ret is None:
ret = lookup_dict.get(default, None)
if base and base in lookup_dict:
base_values = lookup_dict[base]
if ret is None:
ret = base_values
elif isinstance(base_values, Mapping):
if not isinstance(ret, Mapping):
raise SaltException(
"filter_by default and look-up values must both be dictionaries."
)
ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret)
if merge:
if not isinstance(merge, Mapping):
raise SaltException("filter_by merge argument must be a dictionary.")
if ret is None:
ret = merge
else:
salt.utils.dictupdate.update(ret, copy.deepcopy(merge))
return ret
def traverse_dict(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
"""
Traverse a dict using a colon-delimited (or otherwise delimited, using the
'delimiter' param) target string. The target 'foo:bar:baz' will return
data['foo']['bar']['baz'] if this value exists, and will otherwise return
the dict in the default argument.
"""
ptr = data
try:
for each in key.split(delimiter):
ptr = ptr[each]
except (KeyError, IndexError, TypeError):
# Encountered a non-indexable value in the middle of traversing
return default
return ptr
@jinja_filter("traverse")
def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
"""
Traverse a dict or list using a colon-delimited (or otherwise delimited,
using the 'delimiter' param) target string. The target 'foo:bar:0' will
return data['foo']['bar'][0] if this value exists, and will otherwise
return the dict in the default argument.
Function will automatically determine the target type.
The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
then return data['foo']['bar']['0']
"""
ptr = data
if isinstance(key, str):
key = key.split(delimiter)
if isinstance(key, int):
key = [key]
for each in key:
if isinstance(ptr, list):
try:
idx = int(each)
except ValueError:
embed_match = False
# Index was not numeric, lets look at any embedded dicts
for embedded in (x for x in ptr if isinstance(x, dict)):
try:
ptr = embedded[each]
embed_match = True
break
except KeyError:
pass
if not embed_match:
# No embedded dicts matched, return the default
return default
else:
embed_match = False
# Index was numeric, lets look at any embedded dicts
# using the converted version of each.
for embedded in (x for x in ptr if isinstance(x, dict)):
try:
ptr = embedded[idx]
embed_match = True
break
except KeyError:
pass
if not embed_match:
try:
ptr = ptr[idx]
except IndexError:
return default
else:
try:
ptr = ptr[each]
except KeyError:
# Late import to avoid circular import
import salt.utils.args
# YAML-load the current key (catches integer/float dict keys)
try:
loaded_key = salt.utils.args.yamlify_arg(each)
except Exception: # pylint: disable=broad-except
return default
if loaded_key == each:
# After YAML-loading, the desired key is unchanged. This
# means that the KeyError caught above is a legitimate
# failure to match the desired key. Therefore, return the
# default.
return default
else:
# YAML-loading the key changed its value, so re-check with
# the loaded key. This is how we can match a numeric key
# with a string-based expression.
try:
ptr = ptr[loaded_key]
except (KeyError, TypeError):
return default
except TypeError:
return default
return ptr
def subdict_match(
data, expr, delimiter=DEFAULT_TARGET_DELIM, regex_match=False, exact_match=False
):
"""
Check for a match in a dictionary using a delimiter character to denote
levels of subdicts, and also allowing the delimiter character to be
matched. Thus, 'foo:bar:baz' will match data['foo'] == 'bar:baz' and
data['foo']['bar'] == 'baz'. The latter would take priority over the
former, as more deeply-nested matches are tried first.
"""
def _match(target, pattern, regex_match=False, exact_match=False):
# XXX: A lot of this logic is here because of supporting PY2 and PY3,
# now that we only support PY3 we should probably re-visit what's going
# on here.
try:
target = str(target).lower()
except UnicodeDecodeError:
target = salt.utils.stringutils.to_unicode(target).lower()
try:
pattern = str(pattern).lower()
except UnicodeDecodeError:
pattern = salt.utils.stringutils.to_unicode(pattern).lower()
if regex_match:
try:
return re.match(pattern, target)
except Exception: # pylint: disable=broad-except
log.error("Invalid regex '%s' in match", pattern)
return False
else:
return (
target == pattern if exact_match else fnmatch.fnmatch(target, pattern)
)
def _dict_match(target, pattern, regex_match=False, exact_match=False):
ret = False
wildcard = pattern.startswith("*:")
if wildcard:
pattern = pattern[2:]
if pattern == "*":
# We are just checking that the key exists
ret = True
if not ret and pattern in target:
# We might want to search for a key
ret = True
if not ret and subdict_match(
target, pattern, regex_match=regex_match, exact_match=exact_match
):
ret = True
if not ret and wildcard:
for key in target:
if isinstance(target[key], dict):
if _dict_match(
target[key],
pattern,
regex_match=regex_match,
exact_match=exact_match,
):
return True
elif isinstance(target[key], list):
for item in target[key]:
if _match(
item,
pattern,
regex_match=regex_match,
exact_match=exact_match,
):
return True
elif _match(
target[key],
pattern,
regex_match=regex_match,
exact_match=exact_match,
):
return True
return ret
splits = expr.split(delimiter)
num_splits = len(splits)
if num_splits == 1:
# Delimiter not present, this can't possibly be a match
return False
# If we have 4 splits, then we have three delimiters. Thus, the indexes we
# want to use are 3, 2, and 1, in that order.
for idx in range(num_splits - 1, 0, -1):
key = delimiter.join(splits[:idx])
if key == "*":
# We are matching on everything under the top level, so we need to
# treat the match as the entire data being passed in
matchstr = expr
match = data
else:
matchstr = delimiter.join(splits[idx:])
match = traverse_dict_and_list(data, key, {}, delimiter=delimiter)
log.debug(
"Attempting to match '%s' in '%s' using delimiter '%s'",
matchstr,
key,
delimiter,
)
if match == {}:
continue
if isinstance(match, dict):
if _dict_match(
match, matchstr, regex_match=regex_match, exact_match=exact_match
):
return True
continue
if isinstance(match, (list, tuple)):
# We are matching a single component to a single list member
for member in match:
if isinstance(member, dict):
if _dict_match(
member,
matchstr,
regex_match=regex_match,
exact_match=exact_match,
):
return True
if _match(
member, matchstr, regex_match=regex_match, exact_match=exact_match
):
return True
continue
if _match(match, matchstr, regex_match=regex_match, exact_match=exact_match):
return True
return False
@jinja_filter("substring_in_list")
def substr_in_list(string_to_search_for, list_to_search):
"""
Return a boolean value that indicates whether or not a given
string is present in any of the strings which comprise a list
"""
return any(string_to_search_for in s for s in list_to_search)
def is_dictlist(data):
"""
Returns True if data is a list of one-element dicts (as found in many SLS
schemas), otherwise returns False
"""
if isinstance(data, list):
for element in data:
if isinstance(element, dict):
if len(element) != 1:
return False
else:
return False
return True
return False
def repack_dictlist(data, strict=False, recurse=False, key_cb=None, val_cb=None):
"""
Takes a list of one-element dicts (as found in many SLS schemas) and
repacks into a single dictionary.
"""
if isinstance(data, str):
try:
data = salt.utils.yaml.safe_load(data)
except salt.utils.yaml.parser.ParserError as err:
log.error(err)
return {}
if key_cb is None:
key_cb = lambda x: x
if val_cb is None:
val_cb = lambda x, y: y
valid_non_dict = ((str,), (int,), float)
if isinstance(data, list):
for element in data:
if isinstance(element, valid_non_dict):
continue
if isinstance(element, dict):
if len(element) != 1:
log.error(
"Invalid input for repack_dictlist: key/value pairs "
"must contain only one element (data passed: %s).",
element,
)
return {}
else:
log.error(
"Invalid input for repack_dictlist: element %s is "
"not a string/dict/numeric value",
element,
)
return {}
else:
log.error(
"Invalid input for repack_dictlist, data passed is not a list (%s)", data
)
return {}
ret = {}
for element in data:
if isinstance(element, valid_non_dict):
ret[key_cb(element)] = None
else:
key = next(iter(element))
val = element[key]
if is_dictlist(val):
if recurse:
ret[key_cb(key)] = repack_dictlist(val, recurse=recurse)
elif strict:
log.error(
"Invalid input for repack_dictlist: nested dictlist "
"found, but recurse is set to False"
)
return {}
else:
ret[key_cb(key)] = val_cb(key, val)
else:
ret[key_cb(key)] = val_cb(key, val)
return ret
@jinja_filter("is_list")
def is_list(value):
"""
Check if a variable is a list.
"""
return isinstance(value, list)
@jinja_filter("is_iter")
def is_iter(thing, ignore=(str,)):
"""
Test if an object is iterable, but not a string type.
Test if an object is an iterator or is iterable itself. By default this
does not return True for string objects.
The `ignore` argument defaults to a list of string types that are not
considered iterable. This can be used to also exclude things like
dictionaries or named tuples.
Based on https://bitbucket.org/petershinners/yter
"""
if ignore and isinstance(thing, ignore):
return False
try:
iter(thing)
return True
except TypeError:
return False
@jinja_filter("sorted_ignorecase")
def sorted_ignorecase(to_sort):
"""
Sort a list of strings ignoring case.
>>> L = ['foo', 'Foo', 'bar', 'Bar']
>>> sorted(L)
['Bar', 'Foo', 'bar', 'foo']
>>> sorted(L, key=lambda x: x.lower())
['bar', 'Bar', 'foo', 'Foo']
>>>
"""
return sorted(to_sort, key=lambda x: x.lower())
def is_true(value=None):
"""
Returns a boolean value representing the "truth" of the value passed. The
rules for what is a "True" value are:
1. Integer/float values greater than 0
2. The string values "True" and "true"
3. Any object for which bool(obj) returns True
"""
# First, try int/float conversion
try:
value = int(value)
except (ValueError, TypeError):
pass
try:
value = float(value)
except (ValueError, TypeError):
pass
# Now check for truthiness
if isinstance(value, ((int,), float)):
return value > 0
if isinstance(value, str):
return str(value).lower() == "true"
return bool(value)
@jinja_filter("mysql_to_dict")
def mysql_to_dict(data, key):
"""
Convert MySQL-style output to a python dictionary
"""
ret = {}
headers = [""]
for line in data:
if not line:
continue
if line.startswith("+"):
continue
comps = line.split("|")
for idx, comp in enumerate(comps):
comps[idx] = comp.strip()
if len(headers) > 1:
index = len(headers) - 1
row = {}
for field in range(index):
if field < 1:
continue
row[headers[field]] = salt.utils.stringutils.to_num(comps[field])
ret[row[key]] = row
else:
headers = comps
return ret
def simple_types_filter(data):
"""
Convert the data list, dictionary into simple types, i.e., int, float, string,
bool, etc.
"""
if data is None:
return data
simpletypes_keys = ((str,), str, (int,), float, bool)
simpletypes_values = tuple(list(simpletypes_keys) + [list, tuple])
if isinstance(data, (list, tuple)):
simplearray = []
for value in data:
if value is not None:
if isinstance(value, (dict, list)):
value = simple_types_filter(value)
elif not isinstance(value, simpletypes_values):
value = repr(value)
simplearray.append(value)
return simplearray
if isinstance(data, dict):
simpledict = {}
for key, value in data.items():
if key is not None and not isinstance(key, simpletypes_keys):
key = repr(key)
if value is not None and isinstance(value, (dict, list, tuple)):
value = simple_types_filter(value)
elif value is not None and not isinstance(value, simpletypes_values):
value = repr(value)
simpledict[key] = value
return simpledict
return data
def stringify(data):
"""
Given an iterable, returns its items as a list, with any non-string items
converted to unicode strings.
"""
ret = []
for item in data:
if not isinstance(item, str):
item = str(item)
ret.append(item)
return ret
@jinja_filter("json_query")
def json_query(data, expr):
"""
Query data using JMESPath language (http://jmespath.org).
Requires the https://github.com/jmespath/jmespath.py library.
:param data: A complex data structure to query
:param expr: A JMESPath expression (query)
:returns: The query result
.. code-block:: jinja
{"services": [
{"name": "http", "host": "1.2.3.4", "port": 80},
{"name": "smtp", "host": "1.2.3.5", "port": 25},
{"name": "ssh", "host": "1.2.3.6", "port": 22},
]} | json_query("services[].port") }}
will be rendered as:
.. code-block:: text
[80, 25, 22]
"""
if jmespath is None:
err = "json_query requires jmespath module installed"
log.error(err)
raise RuntimeError(err)
return jmespath.search(expr, data)
def _is_not_considered_falsey(value, ignore_types=()):
"""
Helper function for filter_falsey to determine if something is not to be
considered falsey.
:param any value: The value to consider
:param list ignore_types: The types to ignore when considering the value.
:return bool
"""
return isinstance(value, bool) or type(value) in ignore_types or value
def filter_falsey(data, recurse_depth=None, ignore_types=()):
"""
Helper function to remove items from an iterable with falsey value.
Removes ``None``, ``{}`` and ``[]``, 0, '' (but does not remove ``False``).
Recurses into sub-iterables if ``recurse`` is set to ``True``.
:param dict/list data: Source iterable (dict, OrderedDict, list, set, ...) to process.
:param int recurse_depth: Recurse this many levels into values that are dicts
or lists to also process those. Default: 0 (do not recurse)
:param list ignore_types: Contains types that can be falsey but must not
be filtered. Default: Only booleans are not filtered.
:return type(data)
.. versionadded:: 3000
"""
filter_element = (
functools.partial(
filter_falsey, recurse_depth=recurse_depth - 1, ignore_types=ignore_types
)
if recurse_depth
else lambda x: x
)
if isinstance(data, dict):
processed_elements = [
(key, filter_element(value)) for key, value in data.items()
]
return type(data)(
[
(key, value)
for key, value in processed_elements
if _is_not_considered_falsey(value, ignore_types=ignore_types)
]
)
if is_iter(data):
processed_elements = (filter_element(value) for value in data)
return type(data)(
[
value
for value in processed_elements
if _is_not_considered_falsey(value, ignore_types=ignore_types)
]
)
return data
def recursive_diff(
old, new, ignore_keys=None, ignore_order=False, ignore_missing_keys=False
):
"""
Performs a recursive diff on mappings and/or iterables and returns the result
in a {'old': values, 'new': values}-style.
Compares dicts and sets unordered (obviously), OrderedDicts and Lists ordered
(but only if both ``old`` and ``new`` are of the same type),
all other Mapping types unordered, and all other iterables ordered.
:param mapping/iterable old: Mapping or Iterable to compare from.
:param mapping/iterable new: Mapping or Iterable to compare to.
:param list ignore_keys: List of keys to ignore when comparing Mappings.
:param bool ignore_order: Compare ordered mapping/iterables as if they were unordered.
:param bool ignore_missing_keys: Do not return keys only present in ``old``
but missing in ``new``. Only works for regular dicts.
:return dict: Returns dict with keys 'old' and 'new' containing the differences.
"""
ignore_keys = ignore_keys or []
res = {}
ret_old = copy.deepcopy(old)
ret_new = copy.deepcopy(new)
if (
isinstance(old, OrderedDict)
and isinstance(new, OrderedDict)
and not ignore_order
):
append_old, append_new = [], []
if len(old) != len(new):
min_length = min(len(old), len(new))
# The list coercion is required for Py3
append_old = list(old.keys())[min_length:]
append_new = list(new.keys())[min_length:]
# Compare ordered
for (key_old, key_new) in zip(old, new):
if key_old == key_new:
if key_old in ignore_keys:
del ret_old[key_old]
del ret_new[key_new]
else:
res = recursive_diff(
old[key_old],
new[key_new],
ignore_keys=ignore_keys,
ignore_order=ignore_order,
ignore_missing_keys=ignore_missing_keys,
)
if not res: # Equal
del ret_old[key_old]
del ret_new[key_new]
else:
ret_old[key_old] = res["old"]
ret_new[key_new] = res["new"]
else:
if key_old in ignore_keys:
del ret_old[key_old]
if key_new in ignore_keys:
del ret_new[key_new]
# If the OrderedDicts were of inequal length, add the remaining key/values.
for item in append_old:
ret_old[item] = old[item]
for item in append_new:
ret_new[item] = new[item]
ret = {"old": ret_old, "new": ret_new} if ret_old or ret_new else {}
elif isinstance(old, Mapping) and isinstance(new, Mapping):
# Compare unordered
for key in set(list(old) + list(new)):
if key in ignore_keys:
ret_old.pop(key, None)
ret_new.pop(key, None)
elif ignore_missing_keys and key in old and key not in new:
del ret_old[key]
elif key in old and key in new:
res = recursive_diff(
old[key],
new[key],
ignore_keys=ignore_keys,
ignore_order=ignore_order,
ignore_missing_keys=ignore_missing_keys,
)
if not res: # Equal
del ret_old[key]
del ret_new[key]
else:
ret_old[key] = res["old"]
ret_new[key] = res["new"]
ret = {"old": ret_old, "new": ret_new} if ret_old or ret_new else {}
elif isinstance(old, set) and isinstance(new, set):
ret = {"old": old - new, "new": new - old} if old - new or new - old else {}
elif is_iter(old) and is_iter(new):
# Create a list so we can edit on an index-basis.
list_old = list(ret_old)
list_new = list(ret_new)
if ignore_order:
for item_old in old:
for item_new in new:
res = recursive_diff(
item_old,
item_new,
ignore_keys=ignore_keys,
ignore_order=ignore_order,
ignore_missing_keys=ignore_missing_keys,
)
if not res:
list_old.remove(item_old)
list_new.remove(item_new)
continue
else:
remove_indices = []
for index, (iter_old, iter_new) in enumerate(zip(old, new)):
res = recursive_diff(
iter_old,
iter_new,
ignore_keys=ignore_keys,
ignore_order=ignore_order,
ignore_missing_keys=ignore_missing_keys,
)
if not res: # Equal
remove_indices.append(index)
else:
list_old[index] = res["old"]
list_new[index] = res["new"]
for index in reversed(remove_indices):
list_old.pop(index)
list_new.pop(index)
# Instantiate a new whatever-it-was using the list as iterable source.
# This may not be the most optimized in way of speed and memory usage,
# but it will work for all iterable types.
ret = (
{"old": type(old)(list_old), "new": type(new)(list_new)}
if list_old or list_new
else {}
)
else:
ret = {} if old == new else {"old": ret_old, "new": ret_new}
return ret
def get_value(obj, path, default=None):
"""
Get the values for a given path.
:param path:
keys of the properties in the tree separated by colons.
One segment in the path can be replaced by an id surrounded by curly braces.
This will match all items in a list of dictionary.
:param default:
default value to return when no value is found
:return:
a list of dictionaries, with at least the "value" key providing the actual value.
If a placeholder was used, the placeholder id will be a key providing the replacement for it.
Note that a value that wasn't found in the tree will be an empty list.
This ensures we can make the difference with a None value set by the user.
"""
res = [{"value": obj}]
if path:
key = path[: path.find(":")] if ":" in path else path
next_path = path[path.find(":") + 1 :] if ":" in path else None
if key.startswith("{") and key.endswith("}"):
placeholder_name = key[1:-1]
# There will be multiple values to get here
items = []
if obj is None:
return res
if isinstance(obj, dict):
items = obj.items()
elif isinstance(obj, list):
items = enumerate(obj)
def _append_placeholder(value_dict, key):
value_dict[placeholder_name] = key
return value_dict
values = [
[
_append_placeholder(item, key)
for item in get_value(val, next_path, default)
]
for key, val in items
]
# flatten the list
values = [y for x in values for y in x]
return values
elif isinstance(obj, dict):
if key not in obj.keys():
return [{"value": default}]
value = obj.get(key)
if res is not None:
res = get_value(value, next_path, default)
else:
res = [{"value": value}]
else:
return [{"value": default if obj is not None else obj}]
return res
@jinja_filter("flatten")
def flatten(data, levels=None, preserve_nulls=False, _ids=None):
"""
.. versionadded:: 3005
Flatten a list.
:param data: A list to flatten
:param levels: The number of levels in sub-lists to descend
:param preserve_nulls: Preserve nulls in a list, by default flatten removes
them
:param _ids: Parameter used internally within the function to detect
reference cycles.
:returns: A flat(ter) list of values
.. code-block:: jinja
{{ [3, [4, 2] ] | flatten }}
# => [3, 4, 2]
Flatten only the first level of a list:
.. code-block:: jinja
{{ [3, [4, [2]] ] | flatten(levels=1) }}
# => [3, 4, [2]]
Preserve nulls in a list, by default flatten removes them.
.. code-block:: jinja
{{ [3, None, [4, [2]] ] | flatten(levels=1, preserve_nulls=True) }}
# => [3, None, 4, [2]]
"""
if _ids is None:
_ids = set()
if id(data) in _ids:
raise RecursionError("Reference cycle detected. Check input list.")
_ids.add(id(data))
ret = []
for element in data:
if not preserve_nulls and element in (None, "None", "null"):
# ignore null items
continue
elif is_iter(element):
if levels is None:
ret.extend(flatten(element, preserve_nulls=preserve_nulls, _ids=_ids))
elif levels >= 1:
# decrement as we go down the stack
ret.extend(
flatten(
element,
levels=(int(levels) - 1),
preserve_nulls=preserve_nulls,
_ids=_ids,
)
)
else:
ret.append(element)
else:
ret.append(element)
return ret
| saltstack/salt | salt/utils/data.py | Python | apache-2.0 | 52,366 | [
"VisIt"
] | f9710eccd672622a0ba43e56993c754d50f23e2991e170004fcf62f0fbf0e4e3 |
from flask import Flask, render_template, session, request, redirect
import random
app = Flask(__name__)
app.secret_key = 'my_secret_key'
@app.route('/')
def index():
if not 'gold' in session:
session['gold'] = 0
if not 'activities' in session:
session['activities'] = []
return render_template('index.html')
@app.route('/process', methods = ['POST'])
def process():
buildings = {
'farm':random.randint(5,10),
'casino':random.randint(-50,50),
'cave':random.randint(0,30),
'house':random.randint(0,5)
}
if request.form['building'] in buildings:
""" OMG What???"""
result = buildings[request.form['building']]
session['gold'] = session['gold']+result
result_dictionary = {
'class': ('red','green')[result > 0],
'activity': "You went to the {} and {} {} gold!".format(request.form['building'], ('lost','gained')[result > 0], result)
}
session['activities'].append(result_dictionary)
return redirect('/')
if __name__ == '__main__':
app.run(debug = True)
"""
Explain line 24 - 31. Will it work? How, where what? why!?
#25 tells you how many golds you have based
on which building is visited. The activity is passed to an empty array
that was created for activities A dictonary is created of concatenated 'activity'
string describing the actions.
"""
| authman/Python201609 | Jessie Smith/assignments/Flask Olympics/olympics8/server.py | Python | mit | 1,465 | [
"CASINO"
] | cb332b193d6cd4ed3a6d6ebd50e695c9a8ecb523a26140c3c892b615ed9955e5 |
from chempy.util.testing import requires
from ..integrated import pseudo_irrev, pseudo_rev, binary_irrev, binary_rev
try:
import sympy
except ImportError:
sympy = None
else:
one = sympy.S(1)
t, kf, kb, prod, major, minor = sympy.symbols(
"t kf kb prod major minor", negative=False, nonnegative=True, real=True
)
subsd = {
t: one * 2,
kf: one * 3,
kb: one * 7,
major: one * 11,
minor: one * 13,
prod: one * 0,
}
@requires("sympy")
def test_pseudo_irrev():
f = pseudo_irrev(t, kf, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
assert (num_dfdt - (major * kf * (minor - f)).subs(subsd)).simplify() == 0
@requires("sympy")
def test_pseudo_rev():
f = pseudo_rev(t, kf, kb, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
assert (num_dfdt - (major * kf * (minor - f) - kb * f).subs(subsd)).simplify() == 0
@requires("sympy")
def test_binary_irrev():
f = binary_irrev(t, kf, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
assert (num_dfdt - (kf * (minor - f) * (major - f)).subs(subsd)).simplify() == 0
@requires("sympy")
def test_binary_rev():
f = binary_rev(t, kf, kb, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
ans = kf * (minor - f) * (major - f) - kb * f
# symbolic susbsitution fails:
assert abs(float(num_dfdt) - float(ans.subs(subsd))) < 2e-14
| bjodah/aqchem | chempy/kinetics/tests/test_integrated.py | Python | bsd-2-clause | 1,549 | [
"ChemPy"
] | 45340eb9303a90db811f9211b635cacc18072f3755c25e06522f32b66c355bfe |
from paraview.simple import *
from paraview import smtesting
import vtk
import vtk.vtkRenderingVolume
import os
paraview.simple._DisableFirstRenderCameraReset()
# need a baseline?
saveImage = False
# check for driver support first
rw = vtk.vtkRenderWindow()
rw.SetSize(1,1)
rw.Render()
ptm = vtk.vtkProjectedTetrahedraMapper()
ok = ptm.IsSupported(rw)
print
print 'ProjectedTetrahedraMapper %s supported '%(
'is' if(ok) else 'is not')
del ptm
del rw
if ok:
smtesting.ProcessCommandLineArguments()
smtesting.LoadServerManagerState(smtesting.StateXMLFileName)
view = GetRenderView()
view.RemoteRenderThreshold = 0;
if saveImage:
SetActiveView(view)
Render()
imageFile = os.path.splitext(os.path.basename(smtesting.StateXMLFileName))[0]
WriteImage('%s/../../%s.png'%(smtesting.TempDir, imageFile))
if not smtesting.DoRegressionTesting(view.SMProxy):
raise smtesting.TestError, 'Test failed.'
print
print 'Test passes'
else:
print 'Skipped untested.'
print
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/Applications/ParaView/Testing/Python/StructuredGridVolumeRendering.py | Python | gpl-3.0 | 1,007 | [
"ParaView",
"VTK"
] | 13cd691c0b14c442d2aec4a81e18b539aaf83b2f16455d1ca689d2bf27b6609e |
# -*-python-*-
#
# Copyright (C) 2006-2013 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
# (c) 2006 Sergey Lapin <slapin@dataart.com>
import vcauth
import string
import os.path
import debug
from ConfigParser import ConfigParser
class ViewVCAuthorizer(vcauth.GenericViewVCAuthorizer):
"""Subversion authz authorizer module"""
def __init__(self, username, params={}):
self.rootpaths = { } # {root -> { paths -> access boolean for USERNAME }}
# Get the authz file location from a passed-in parameter.
self.authz_file = params.get('authzfile')
if not self.authz_file:
raise debug.ViewVCException("No authzfile configured")
if not os.path.exists(self.authz_file):
raise debug.ViewVCException("Configured authzfile file not found")
# See if the admin wants us to do case normalization of usernames.
self.force_username_case = params.get('force_username_case')
if self.force_username_case == "upper":
self.username = username and string.upper(username) or username
elif self.force_username_case == "lower":
self.username = username and string.lower(username) or username
elif not self.force_username_case:
self.username = username
else:
raise debug.ViewVCException("Invalid value for force_username_case "
"option")
def _get_paths_for_root(self, rootname):
if self.rootpaths.has_key(rootname):
return self.rootpaths[rootname]
paths_for_root = { }
# Parse the authz file, replacing ConfigParser's optionxform()
# method with something that won't futz with the case of the
# option names.
cp = ConfigParser()
cp.optionxform = lambda x: x
try:
cp.read(self.authz_file)
except:
raise debug.ViewVCException("Unable to parse configured authzfile file")
# Figure out if there are any aliases for the current username
aliases = []
if cp.has_section('aliases'):
for alias in cp.options('aliases'):
entry = cp.get('aliases', alias)
if entry == self.username:
aliases.append(alias)
# Figure out which groups USERNAME has a part of.
groups = []
if cp.has_section('groups'):
all_groups = []
def _process_group(groupname):
"""Inline function to handle groups within groups.
For a group to be within another group in SVN, the group
definitions must be in the correct order in the config file.
ie. If group A is a member of group B then group A must be
defined before group B in the [groups] section.
Unfortunately, the ConfigParser class provides no way of
finding the order in which groups were defined so, for reasons
of practicality, this function lets you get away with them
being defined in the wrong order. Recursion is guarded
against though."""
# If we already know the user is part of this already-
# processed group, return that fact.
if groupname in groups:
return 1
# Otherwise, ensure we don't process a group twice.
if groupname in all_groups:
return 0
# Store the group name in a global list so it won't be processed again
all_groups.append(groupname)
group_member = 0
groupname = groupname.strip()
entries = string.split(cp.get('groups', groupname), ',')
for entry in entries:
entry = string.strip(entry)
if entry == self.username:
group_member = 1
break
elif entry[0:1] == "@" and _process_group(entry[1:]):
group_member = 1
break
elif entry[0:1] == "&" and entry[1:] in aliases:
group_member = 1
break
if group_member:
groups.append(groupname)
return group_member
# Process the groups
for group in cp.options('groups'):
_process_group(group)
def _userspec_matches_user(userspec):
# If there is an inversion character, recurse and return the
# opposite result.
if userspec[0:1] == '~':
return not _userspec_matches_user(userspec[1:])
# See if the userspec applies to our current user.
return userspec == '*' \
or userspec == self.username \
or (self.username is not None and userspec == "$authenticated") \
or (self.username is None and userspec == "$anonymous") \
or (userspec[0:1] == "@" and userspec[1:] in groups) \
or (userspec[0:1] == "&" and userspec[1:] in aliases)
def _process_access_section(section):
"""Inline function for determining user access in a single
config secction. Return a two-tuple (ALLOW, DENY) containing
the access determination for USERNAME in a given authz file
SECTION (if any)."""
# Figure if this path is explicitly allowed or denied to USERNAME.
allow = deny = 0
for user in cp.options(section):
user = string.strip(user)
if _userspec_matches_user(user):
# See if the 'r' permission is among the ones granted to
# USER. If so, we can stop looking. (Entry order is not
# relevant -- we'll use the most permissive entry, meaning
# one 'allow' is all we need.)
allow = string.find(cp.get(section, user), 'r') != -1
deny = not allow
if allow:
break
return allow, deny
# Read the other (non-"groups") sections, and figure out in which
# repositories USERNAME or his groups have read rights. We'll
# first check groups that have no specific repository designation,
# then superimpose those that have a repository designation which
# matches the one we're asking about.
root_sections = []
for section in cp.sections():
# Skip the "groups" section -- we handled that already.
if section == 'groups':
continue
if section == 'aliases':
continue
# Process root-agnostic access sections; skip (but remember)
# root-specific ones that match our root; ignore altogether
# root-specific ones that don't match our root. While we're at
# it, go ahead and figure out the repository path we're talking
# about.
if section.find(':') == -1:
path = section
else:
name, path = string.split(section, ':', 1)
if name == rootname:
root_sections.append(section)
continue
# Check for a specific access determination.
allow, deny = _process_access_section(section)
# If we got an explicit access determination for this path and this
# USERNAME, record it.
if allow or deny:
if path != '/':
path = '/' + string.join(filter(None, string.split(path, '/')), '/')
paths_for_root[path] = allow
# Okay. Superimpose those root-specific values now.
for section in root_sections:
# Get the path again.
name, path = string.split(section, ':', 1)
# Check for a specific access determination.
allow, deny = _process_access_section(section)
# If we got an explicit access determination for this path and this
# USERNAME, record it.
if allow or deny:
if path != '/':
path = '/' + string.join(filter(None, string.split(path, '/')), '/')
paths_for_root[path] = allow
# If the root isn't readable, there's no point in caring about all
# the specific paths the user can't see. Just point the rootname
# to a None paths dictionary.
root_is_readable = 0
for path in paths_for_root.keys():
if paths_for_root[path]:
root_is_readable = 1
break
if not root_is_readable:
paths_for_root = None
self.rootpaths[rootname] = paths_for_root
return paths_for_root
def check_root_access(self, rootname):
paths = self._get_paths_for_root(rootname)
return (paths is not None) and 1 or 0
def check_universal_access(self, rootname):
paths = self._get_paths_for_root(rootname)
if not paths: # None or empty.
return 0
# Search the access determinations. If there's a mix, we can't
# claim a universal access determination.
found_allow = 0
found_deny = 0
for access in paths.values():
if access:
found_allow = 1
else:
found_deny = 1
if found_allow and found_deny:
return None
# We didn't find both allowances and denials, so we must have
# found one or the other. Denials only is a universal denial.
if found_deny:
return 0
# ... but allowances only is only a universal allowance if read
# access is granted to the root directory.
if found_allow and paths.has_key('/'):
return 1
# Anything else is indeterminable.
return None
def check_path_access(self, rootname, path_parts, pathtype, rev=None):
# Crawl upward from the path represented by PATH_PARTS toward to
# the root of the repository, looking for an explicitly grant or
# denial of access.
paths = self._get_paths_for_root(rootname)
if paths is None:
return 0
parts = path_parts[:]
while parts:
path = '/' + string.join(parts, '/')
if paths.has_key(path):
return paths[path]
del parts[-1]
return paths.get('/', 0)
| marcellodesales/svnedge-console | svn-server/lib/viewvc/vcauth/svnauthz/__init__.py | Python | agpl-3.0 | 9,798 | [
"VisIt"
] | 517c3e55b38c3c0b248ec537d5ce33d709e4169a9b7c3eb7ebd471fa70c6d683 |
"""
tests for potential.py
"""
import mdtraj
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_array_equal)
from odin import potential
from odin import exptdata
from odin.testing import ref_file
def test_flat_potential():
fp = potential.FlatPotential()
ala2 = mdtraj.trajectory.load(ref_file('ala2.pdb'))
assert_array_equal(fp(ala2), np.ones(1))
class TestWeightedExptPotential(object):
def setup(self):
# a dummy expts for testing
restraint_array = np.zeros((2,4))
restraint_array[0,:] = np.array([0, 5, 1.0, 1])
restraint_array[1,:] = np.array([4, 10, 10.0, 0])
self.expt1 = exptdata.DistanceRestraint(restraint_array)
restraint_array = np.zeros((3,4))
restraint_array[0,:] = np.array([0, 5, 1.0, 1])
restraint_array[1,:] = np.array([4, 10, 10.0, 0])
restraint_array[2,:] = np.array([4, 10, 10.0, 0])
self.expt2 = exptdata.DistanceRestraint(restraint_array)
self.num_meas = 5
self.default_weights = np.ones(self.num_meas)
self.wep = potential.WeightedExptPotential(self.expt1, self.expt2)
return
def test_call(self):
ala2 = mdtraj.trajectory.load(ref_file('ala2.pdb'))
energy = self.wep(ala2)
assert energy[0] == 3.0
assert energy.shape == (1,)
def test_add_experiment(self):
self.wep.add_experiment(self.expt1)
assert self.wep.num_experiments == 3
def test_weights(self):
assert_array_almost_equal(self.wep.weights, self.default_weights)
def test_num_experiments(self):
assert self.wep.num_experiments == 2
def test_set_all_weights(self):
self.wep.set_all_weights(np.zeros(self.num_meas))
assert_array_almost_equal(self.wep.weights, np.zeros(self.num_meas))
def test_expt_weights(self):
assert len(self.wep.expt_weights(0)) == 2
assert len(self.wep.expt_weights(1)) == 3
def test_predictions(self):
ala2 = mdtraj.trajectory.load(ref_file('ala2.pdb'))
p = self.wep.predictions(ala2)
assert p.shape == (1,5)
#print 'pred0', self.expt1.predict(ala2)
#print 'prediction', p
| tjlane/odin | test/test_potential.py | Python | gpl-2.0 | 2,436 | [
"MDTraj"
] | c3d9569156a9953c82b17a58549d8a2517e657abfa88f3c0059b5ebdadc29331 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_asm_policy_server_technology
short_description: Manages Server Technology on an ASM policy
description:
- Manages Server Technology on ASM policies.
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the server technology to apply on, or remove from, the ASM policy.
type: str
required: True
choices:
- jQuery
- Java Servlets/JSP
- ASP
- WebDAV
- IIS
- Front Page Server Extensions (FPSE)
- ASP.NET
- Microsoft Windows
- Unix/Linux
- Macromedia ColdFusion
- WordPress
- Apache Tomcat
- Apache/NCSA HTTP Server
- Outlook Web Access
- PHP
- Microsoft SQL Server
- Oracle
- MySQL
- Lotus Domino
- BEA Systems WebLogic Server
- Macromedia JRun
- Novell
- Cisco
- SSI (Server Side Includes)
- Proxy Servers
- CGI
- Sybase/ASE
- IBM DB2
- PostgreSQL
- XML
- Apache Struts
- Elasticsearch
- JBoss
- Citrix
- Node.js
- Django
- MongoDB
- Ruby
- JavaServer Faces (JSF)
- Joomla
- Jetty
policy_name:
description:
- Specifies the name of an existing ASM policy to add or remove a server technology to.
type: str
required: True
state:
description:
- When C(present), ensures the resource exists.
- When C(absent), ensures the resource is removed.
type: str
default: present
choices:
- present
- absent
partition:
description:
- This parameter is only used when identifying an ASM policy.
type: str
default: Common
notes:
- This module is primarily used as a component of configuring an ASM policy in Ansible Galaxy ASM Policy Role.
- Requires BIG-IP >= 13.0.0
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add Server Technology to ASM Policy
bigip_asm_policy_server_technology:
name: Joomla
policy_name: FooPolicy
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Remove Server Technology from ASM Policy
bigip_asm_policy_server_technology:
name: Joomla
policy_name: FooPolicy
state: absent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
policy_name:
description: The name of the ASM policy
returned: changed
type: str
sample: FooPolicy
name:
description: The name of Server Technology added/removed on the ASM policy
returned: changed
type: str
sample: Joomla
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from distutils.version import LooseVersion
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, f5_argument_spec
)
from ..module_utils.icontrol import (
module_provisioned, tmos_version
)
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
]
returnables = [
'policy_name',
'name'
]
updatables = [
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
if not module_provisioned(self.client, 'asm'):
raise F5ModuleError(
"ASM must be provisioned to use this module."
)
if self.version_is_less_than_13():
raise F5ModuleError(
"This module requires TMOS version 13.x and above."
)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def version_is_less_than_13(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('13.0.0'):
return True
else:
return False
def present(self):
if self.exists():
return False
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
policy_id = self._get_policy_id()
server_link = self._get_server_tech_link()
uri = 'https://{0}:{1}/mgmt/tm/asm/policies/{2}/server-technologies/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id,
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' in response and response['items'] != []:
for st in response['items']:
if st['serverTechnologyReference']['link'] == server_link:
self.want.tech_id = st['id']
return True
return False
def _get_policy_id(self):
policy_id = None
uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,id".format(
self.want.policy_name, self.want.partition
)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201] or 'code' in response and response['code'] not in [200, 201]:
raise F5ModuleError(resp.content)
if 'items' in response and response['items'] != []:
policy_id = response['items'][0]['id']
if not policy_id:
raise F5ModuleError(
"The policy with the name {0} does not exist".format(self.want.policy_name)
)
return policy_id
def _get_server_tech_link(self):
link = None
uri = "https://{0}:{1}/mgmt/tm/asm/server-technologies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
name = self.want.name.replace(' ', '%20')
query = "?$filter=contains(serverTechnologyName,'{0}')".format(name)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201] or 'code' in response and response['code'] not in [200, 201]:
raise F5ModuleError(resp.content)
if 'items' in response:
link = response['items'][0]['selfLink']
return link
return link
def create_on_device(self):
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}/server-technologies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
params = dict(serverTechnologyReference={'link': self._get_server_tech_link()})
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def remove_from_device(self):
policy_id = self._get_policy_id()
tech_id = self.want.tech_id
uri = 'https://{0}:{1}/mgmt/tm/asm/policies/{2}/server-technologies/{3}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id,
tech_id,
)
response = self.client.api.delete(uri)
if response.status in [200, 201]:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.tech = [
'jQuery',
'Java Servlets/JSP',
'ASP',
'WebDAV',
'IIS',
'Front Page Server Extensions (FPSE)',
'ASP.NET',
'Microsoft Windows',
'Unix/Linux',
'Macromedia ColdFusion',
'WordPress',
'Apache Tomcat',
'Apache/NCSA HTTP Server',
'Outlook Web Access',
'PHP',
'Microsoft SQL Server',
'Oracle',
'MySQL',
'Lotus Domino',
'BEA Systems WebLogic Server',
'Macromedia JRun',
'Novell',
'Cisco',
'SSI (Server Side Includes)',
'Proxy Servers',
'CGI',
'Sybase/ASE',
'IBM DB2',
'PostgreSQL',
'XML',
'Apache Struts',
'Elasticsearch',
'JBoss',
'Citrix',
'Node.js',
'Django',
'MongoDB',
'Ruby',
'JavaServer Faces (JSF)',
'Joomla',
'Jetty'
]
argument_spec = dict(
policy_name=dict(
required=True
),
name=dict(
choices=self.tech,
required=True
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| F5Networks/f5-ansible-modules | ansible_collections/f5networks/f5_modules/plugins/modules/bigip_asm_policy_server_technology.py | Python | mit | 13,115 | [
"ASE",
"Galaxy"
] | 2d10c2a0d5d6173595d53abeec6eff05518c562f5e85aed8845e3db400863ac5 |
tests = [("python", "UnitTestPackage.py", {}), ]
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
| greglandrum/rdkit | rdkit/ML/ModelPackage/test_list.py | Python | bsd-3-clause | 221 | [
"RDKit"
] | 24cb664559c9a37aac3e0b10392eba97c4c59d40f5e9269174cec2decd322ea8 |
import numpy as np
from numpy import exp, sin, cos, pi, radians, degrees
from sasmodels.weights import Dispersion as BaseDispersion
class Dispersion(BaseDispersion):
r"""
Cyclic gaussian dispersion on orientation.
.. math:
w(\theta) = e^{-\frac{\sin^2 \theta}{2 \sigma^2}}
This provides a close match to the gaussian distribution for
low angles, but the tails are limited to $\pm 90^\circ$. For $\sigma$
large the distribution is approximately uniform. The usual polar coordinate
projection applies, with $\theta$ weights scaled by $\cos \theta$
and $\phi$ weights unscaled.
This is eqivalent to a Maier-Saupe distribution with order
parameter $a = 1/(2 \sigma^2)$, with $\sigma$ in radians.
"""
type = "cyclic_gaussian"
default = dict(npts=35, width=1, nsigmas=3)
# Note: center is always zero for orientation distributions
def _weights(self, center, sigma, lb, ub):
# Convert sigma in degrees to radians
sigma = radians(sigma)
# Limit width to +/- 90 degrees
width = min(self.nsigmas*sigma, pi/2)
x = np.linspace(-width, width, self.npts)
# Truncate the distribution in case the parameter value is limited
x[(x >= radians(lb)) & (x <= radians(ub))]
# Return orientation in degrees with Maier-Saupe weights
return degrees(x), exp(-0.5*sin(x)**2/sigma**2)
| SasView/sasmodels | example/weights/cyclic_gaussian.py | Python | bsd-3-clause | 1,408 | [
"Gaussian"
] | c77486297aeea9aa1ade35766b4af5ef63e4fd25209a8a83943b68bfffc3eca8 |
# Script to run a second derivative gaussian kernel on a grid
# Author : Isabel Restrepo
# 6-14-2009
import bvpl_batch
import time
#time.sleep(30);
bvpl_batch.register_processes();
bvpl_batch.register_datatypes();
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
data_dir = "c:/Experiments/object_recognition/bvpl/CapitolSiteHigh/cropped_world"
output_dir = "c:/Experiments/object_recognition/bvpl/CapitolSiteHigh/gauss_xx_kernel"
param_dir = "c:/Projects/vxl/vxl/contrib/brl/contrib/lemsvxl/src/contrib/isabel/params"
print("Load Voxel Grid");
bvpl_batch.init_process("bvxmLoadGridProcess");
bvpl_batch.set_input_string(0, output_dir + "/gauss_xx_response4d.vox");
bvpl_batch.set_input_string(1,"vnl_vector_fixed_float_4");
bvpl_batch.run_process();
(world_id,world_type)= bvpl_batch.commit_output(0);
world = dbvalue(world_id,world_type);
print("Creating 3D edge kernel");
bvpl_batch.init_process("bvplCreateGauss3dXXKernelVectorProcess");
bvpl_batch.set_input_float(0, 1.5); #Sigma1
bvpl_batch.set_input_float(1, 2); #Sigma2
bvpl_batch.set_input_float(2, 2); #Sigma3
bvpl_batch.run_process();
(kernel_id,kernel_type)= bvpl_batch.commit_output(0);
kernel_vector = dbvalue(kernel_id,kernel_type);
print("Converting Directions to Hue ");
bvpl_batch.init_process("bvplConvertDirectionToHueProcess");
bvpl_batch.set_input_from_db(0,world );
bvpl_batch.set_input_from_db(1,kernel_vector);
bvpl_batch.set_input_string(2, output_dir + "/gauss_xx_hue_c.vox");
bvpl_batch.set_input_string(3, output_dir + "/gauss_xx_hue_c.svg");
bvpl_batch.run_process();
(hue_grid_id,hue_grid_type)= bvpl_batch.commit_output(0);
hue_grid = dbvalue(hue_grid_id,hue_grid_type);
print("Writing Orientation Grid");
bvpl_batch.init_process("bvxmGridToImageStackProcess");
bvpl_batch.set_input_from_db(0,hue_grid);
bvpl_batch.set_input_string(1,"vnl_vector_fixed_float_4");
bvpl_batch.set_input_string(2,output_dir + "/gauss_xx_raw_c");
bvpl_batch.run_process();
| mirestrepo/voxels-at-lems | bvpl/appearance_tests.py | Python | bsd-2-clause | 2,085 | [
"Gaussian"
] | 0ac59fea4b54cf6cb3d8309595be3a92ebf4b953c35245ca4c940b39191adb63 |
"""
Sheet classes.
A Sheet is a two-dimensional arrangement of processing units,
typically modeling a neural region or a subset of cells in a neural
region. Any new Sheet classes added to this directory will
automatically become available for any model.
"""
# Imported here so that all Sheets will be in the same package
from topo.base.sheet import Sheet
from topo.base.projection import ProjectionSheet # pyflakes:ignore (API import)
from topo.base.cf import CFSheet
from topo.base.generatorsheet import GeneratorSheet
from topo.base.generatorsheet import ChannelGeneratorSheet # pyflakes:ignore (API import)
# Imported here for ease of access by users
from topo.base.boundingregion import BoundingBox # pyflakes:ignore (API import)
from topo.base.sheet import activity_type # pyflakes:ignore (API import)
import numpy
import topo
import param
from topo.base.cf import CFIter
from topo.base.projection import Projection
from topo.base.simulation import FunctionEvent, PeriodicEventSequence, EPConnectionEvent
class ActivityCopy(Sheet):
"""
Copies incoming Activity patterns to its activity matrix and output port.
Trivial Sheet class that is useful primarily as a placeholder for
data that is computed elsewhere but that you want to appear as a
Sheet, e.g. when wrapping an external simulation.
"""
dest_ports=['Activity']
src_ports=['Activity']
def input_event(self,conn,data):
self.input_data=data
def process_current_time(self):
if hasattr(self, 'input_data'):
self.activity*=0
self.activity+=self.input_data
self.send_output(src_port='Activity',data=self.activity)
del self.input_data
class SequenceGeneratorSheet(GeneratorSheet):
"""
Sheet that generates a timed sequence of patterns.
This sheet will repeatedly generate the input_sequence, with the
given onsets. The sequence is repeated every self.period time
units. If the total length of the sequence is longer than
self.period, a warning is issued and the sequence repeats
immediately after completion.
"""
input_sequence = param.List(default=[],
doc="""The sequence of patterns to generate. Must be a list of
(onset,generator) tuples. An empty list defaults to the
single tuple: (0,self.input_generator), resulting in
identical behavior to an ordinary GeneratorSheet.""")
def __init__(self,**params):
super(SequenceGeneratorSheet,self).__init__(**params)
if not self.input_sequence:
self.input_sequence = [(0,self.input_generator)]
def start(self):
assert self.simulation
event_seq = []
for delay,gen in self.input_sequence:
event_seq.append(FunctionEvent(self.simulation.convert_to_time_type(delay),self.set_input_generator,gen))
event_seq.append(FunctionEvent(0,self.generate))
now = self.simulation.time()
self.event = PeriodicEventSequence(now+self.simulation.convert_to_time_type(self.phase),self.simulation.convert_to_time_type(self.period),event_seq)
self.simulation.enqueue_event(self.event)
def compute_joint_norm_totals(projlist,active_units_mask=True):
"""
Compute norm_total for each CF in each projection from a group to
be normalized jointly.
"""
# Assumes that all Projections in the list have the same r,c size
assert len(projlist)>=1
iterator = CFIter(projlist[0],active_units_mask=active_units_mask)
for junk,i in iterator():
sums = [p.flatcfs[i].norm_total for p in projlist]
joint_sum = numpy.add.reduce(sums)
for p in projlist:
p.flatcfs[i].norm_total=joint_sum
class JointNormalizingCFSheet(CFSheet):
"""
A type of CFSheet extended to support joint sum-based normalization.
For L1 normalization, joint normalization means normalizing the
sum of (the absolute values of) all weights in a set of
corresponding CFs in different Projections, rather than only
considering weights in the same CF.
This class provides a mechanism for grouping Projections (see
_port_match and _grouped_in_projections) and a learn() function
that computes the joint sums. Joint normalization also requires
having ConnectionField store and return a norm_total for each
neuron, and having an TransferFn that will respect this norm_total
rather than the strict total of the ConnectionField's weights. At
present, CFPOF_DivisiveNormalizeL1 and
CFPOF_DivisiveNormalizeL1_opt do use norm_total; others can be
extended to do something similar if necessary.
To enable joint normalization, you can declare that all the
incoming connections that should be normalized together each
have a dest_port of:
dest_port=('Activity','JointNormalize', 'AfferentGroup1'),
Then all those that have this dest_port will be normalized
together, as long as an appropriate TransferFn is being used.
"""
joint_norm_fn = param.Callable(default=compute_joint_norm_totals,doc="""
Function to use to compute the norm_total for each CF in each
projection from a group to be normalized jointly.""")
# JABALERT: Should check that whenever a connection is added to a
# group, it has the same no of cfs as the existing connections.
def start(self):
self._normalize_weights(active_units_mask=False)
# CEBALERT: rename active_units_mask and default to False
def _normalize_weights(self,active_units_mask=True):
"""
Apply the weights_output_fns for every group of Projections.
If active_units_mask is True, only active units will have
their weights normalized.
"""
for key,projlist in self._grouped_in_projections('JointNormalize').items():
if key == None:
normtype='Individually'
else:
normtype='Jointly'
self.joint_norm_fn(projlist,active_units_mask)
self.debug(normtype + " normalizing:")
for p in projlist:
p.apply_learn_output_fns(active_units_mask=active_units_mask)
self.debug(' %s',p.name)
def learn(self):
"""
Call the learn() method on every Projection to the Sheet, and
call the output functions (jointly if necessary).
"""
# Ask all projections to learn independently
for proj in self.in_connections:
if not isinstance(proj,Projection):
self.debug("Skipping non-Projection "+proj.name)
else:
proj.learn()
# Apply output function in groups determined by dest_port
self._normalize_weights()
class JointNormalizingCFSheet_Continuous(JointNormalizingCFSheet):
"""
CFSheet that runs continuously, with no 'resting' periods between pattern presentations.
Note that learning occurs only when the time is a whole number.
"""
def process_current_time(self):
if self.new_input:
self.new_input = False
if(float(topo.sim.time()) % 1.0 == 0.0):
#self.activate()
if (self.plastic):
self.learn()
#else:
self.activate()
class Continuous(JointNormalizingCFSheet):
"""
CFSheet similar to JointNormalizingCFSheet_Continuous that runs
continuously, with no 'resting' periods between pattern
presentations.
The key difference is that this sheet supports optional snapshot
learning after a particular duration into each fixation, otherwise
continuous learning is applied. Used by the CGCAL and TCAL models.
"""
snapshot_learning = param.NumericTuple(default=None, allow_None=True,
length=3, doc="""
Three tuple e.g (240,130,0.051) corresponding to (period,
interval, epsilon) where topo.sim.time() % period gives the
duration into the current 'fixation. In the example, for
instance, a fixation is 240 milliseconds. The interval specifies
the time within each fixation at which learning is applied i.e
130 milliseconds in the given example. Lastly epsilon is used
for comparison to zero, to make sure snapshot learning occurs
even when there are small mismatches e.g due to small delays
introducted by GeneratorSheets.""")
def process_current_time(self):
if self.snapshot_learning is None:
condition = True
else:
(period, interval, epsilon) = self.snapshot_learning
remainder = float(topo.sim.time()) % period
condition = (remainder % interval) < epsilon
if condition:
if self.plastic:
self.learn()
self.activate()
class SettlingCFSheet(JointNormalizingCFSheet):
"""
A JointNormalizingCFSheet implementing the idea of settling.
Breaks continuous time up into discrete iterations, each
consisting of a series of activations, up to a fixed number of
settling steps. Settling is controlled by the tsettle parameter;
once that number of settling steps has been reached, an external
input is required before the sheet will activate again.
See the LISSOM algorithm (Sirosh and Miikkulainen, Biological
Cybernetics 71:66-78, 1994) for one example of its usage.
"""
strict_tsettle = param.Parameter(default = None,doc="""
If non-None, delay sending output until activation_count reaches this value.""")
mask_init_time=param.Integer(default=5,bounds=(0,None),doc="""
Determines when a new mask is initialized in each new iteration.
The mask is reset whenever new input comes in. Once the
activation_count (see tsettle) reaches mask_init_time, the mask
is initialized to reflect the current activity profile.""")
tsettle=param.Integer(default=8,bounds=(0,None),doc="""
Number of times to activate the SettlingCFSheet sheet for each external input event.
A counter is incremented each time an input is received from any
source, and once the counter reaches tsettle, the last activation
step is skipped so that there will not be any further recurrent
activation. The next external (i.e., afferent or feedback)
event will then start the counter over again.""")
continuous_learning = param.Boolean(default=False, doc="""
Whether to modify the weights after every settling step.
If false, waits until settling is completed before doing learning.""")
precedence = param.Number(0.6)
post_initialization_weights_output_fns = param.HookList([],doc="""
If not empty, weights output_fns that will replace the
existing ones after an initial normalization step.""")
beginning_of_iteration = param.HookList(default=[],instantiate=False,doc="""
List of callables to be executed at the beginning of each iteration.""")
end_of_iteration = param.HookList(default=[],instantiate=False,doc="""
List of callables to be executed at the end of each iteration.""")
def __init__(self,**params):
super(SettlingCFSheet,self).__init__(**params)
self.__counter_stack=[]
self.activation_count = 0
self.new_iteration = True
def start(self):
self._normalize_weights(active_units_mask=False)
if len(self.post_initialization_weights_output_fns)>0:
for proj in self.in_connections:
if not isinstance(proj,Projection):
self.debug("Skipping non-Projection ")
else:
proj.weights_output_fns=self.post_initialization_weights_output_fns
def input_event(self,conn,data):
# On a new afferent input, clear the activity
if self.new_iteration:
for f in self.beginning_of_iteration: f()
self.new_iteration = False
self.activity *= 0.0
for proj in self.in_connections:
proj.activity *= 0.0
self.mask.reset()
super(SettlingCFSheet,self).input_event(conn,data)
### JABALERT! There should be some sort of warning when
### tsettle times the input delay is larger than the input period.
### Right now it seems to do strange things in that case (does it
### settle at all after the first iteration?), but of course that
### is arguably an error condition anyway (and should thus be
### flagged).
# CEBALERT: there is at least one bug in here for tsettle==0: see
# CB/JAB email "LISSOM tsettle question", 2010/03/22.
def process_current_time(self):
"""
Pass the accumulated stimulation through self.output_fns and
send it out on the default output port.
"""
if self.new_input:
self.new_input = False
if self.activation_count == self.mask_init_time:
self.mask.calculate()
if self.tsettle == 0:
# Special case: behave just like a CFSheet
self.activate()
self.learn()
elif self.activation_count == self.tsettle:
# Once we have been activated the required number of times
# (determined by tsettle), reset various counters, learn
# if appropriate, and avoid further activation until an
# external event arrives.
for f in self.end_of_iteration: f()
self.activation_count = 0
self.new_iteration = True # used by input_event when it is called
if (self.plastic and not self.continuous_learning):
self.learn()
else:
self.activate()
self.activation_count += 1
if (self.plastic and self.continuous_learning):
self.learn()
# print the weights of a unit
def printwts(self,x,y):
for proj in self.in_connections:
print proj.name, x, y
print proj.cfs[x,y].weights
def state_push(self,**args):
super(SettlingCFSheet,self).state_push(**args)
self.__counter_stack.append((self.activation_count,self.new_iteration))
def state_pop(self,**args):
super(SettlingCFSheet,self).state_pop(**args)
self.activation_count,self.new_iteration=self.__counter_stack.pop()
def send_output(self,src_port=None,data=None):
"""Send some data out to all connections on the given src_port."""
out_conns_on_src_port = [conn for conn in self.out_connections
if self._port_match(conn.src_port,[src_port])]
for conn in out_conns_on_src_port:
if self.strict_tsettle != None:
if self.activation_count < self.strict_tsettle:
if len(conn.dest_port)>2 and conn.dest_port[2] == 'Afferent':
continue
self.verbose("Sending output on src_port %s via connection %s to %s",
src_port, conn.name, conn.dest.name)
e=EPConnectionEvent(self.simulation.convert_to_time_type(conn.delay)+self.simulation.time(),conn,data)
self.simulation.enqueue_event(e)
_public = list(set([_k for _k,_v in locals().items() if isinstance(_v,type) and issubclass(_v,Sheet)]))
_public += [
"compute_joint_norm_totals",
"BoundingBox",
"activity_type",
]
# Automatically discover all .py files in this directory.
import os,fnmatch
__all__ = _public + [f.split('.py')[0] for f in os.listdir(__path__[0]) if fnmatch.fnmatch(f,'[!._]*.py')]
del f,os,fnmatch
# By default, avoid loading modules that rely on external libraries
# that might not be present on this system.
__all__.remove('ptztracker')
| ioam/topographica | topo/sheet/__init__.py | Python | bsd-3-clause | 15,889 | [
"NEURON"
] | b53bfd8c6517504a0cf6f10f260dd073b0197b39983dc761b580bf1417727489 |
# (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test the cf module.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import iris
import iris.fileformats.cf as cf
from iris.tests import mock
class TestCaching(tests.IrisTest):
def test_cached(self):
# Make sure attribute access to the underlying netCDF4.Variable
# is cached.
name = 'foo'
nc_var = mock.MagicMock()
cf_var = cf.CFAncillaryDataVariable(name, nc_var)
self.assertEqual(nc_var.ncattrs.call_count, 1)
# Accessing a netCDF attribute should result in no further calls
# to nc_var.ncattrs() and the creation of an attribute on the
# cf_var.
# NB. Can't use hasattr() because that triggers the attribute
# to be created!
self.assertTrue('coordinates' not in cf_var.__dict__)
_ = cf_var.coordinates
self.assertEqual(nc_var.ncattrs.call_count, 1)
self.assertTrue('coordinates' in cf_var.__dict__)
# Trying again results in no change.
_ = cf_var.coordinates
self.assertEqual(nc_var.ncattrs.call_count, 1)
self.assertTrue('coordinates' in cf_var.__dict__)
# Trying another attribute results in just a new attribute.
self.assertTrue('standard_name' not in cf_var.__dict__)
_ = cf_var.standard_name
self.assertEqual(nc_var.ncattrs.call_count, 1)
self.assertTrue('standard_name' in cf_var.__dict__)
@tests.skip_data
class TestCFReader(tests.IrisTest):
def setUp(self):
filename = tests.get_data_path(
('NetCDF', 'rotated', 'xyt', 'small_rotPole_precipitation.nc'))
self.cfr = cf.CFReader(filename)
def test_ancillary_variables_pass_0(self):
self.assertEqual(self.cfr.cf_group.ancillary_variables, {})
def test_auxiliary_coordinates_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group.auxiliary_coordinates.keys()),
['lat', 'lon'])
lat = self.cfr.cf_group['lat']
self.assertEqual(lat.shape, (190, 174))
self.assertEqual(lat.dimensions, ('rlat', 'rlon'))
self.assertEqual(lat.ndim, 2)
self.assertEqual(lat.cf_attrs(),
(('long_name', 'latitude'),
('standard_name', 'latitude'),
('units', 'degrees_north')))
lon = self.cfr.cf_group['lon']
self.assertEqual(lon.shape, (190, 174))
self.assertEqual(lon.dimensions, ('rlat', 'rlon'))
self.assertEqual(lon.ndim, 2)
self.assertEqual(lon.cf_attrs(),
(('long_name', 'longitude'),
('standard_name', 'longitude'),
('units', 'degrees_east')))
def test_bounds_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group.bounds.keys()), ['time_bnds'])
time_bnds = self.cfr.cf_group['time_bnds']
self.assertEqual(time_bnds.shape, (4, 2))
self.assertEqual(time_bnds.dimensions, ('time', 'time_bnds'))
self.assertEqual(time_bnds.ndim, 2)
self.assertEqual(time_bnds.cf_attrs(), ())
def test_coordinates_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group.coordinates.keys()),
['rlat', 'rlon', 'time'])
rlat = self.cfr.cf_group['rlat']
self.assertEqual(rlat.shape, (190,))
self.assertEqual(rlat.dimensions, ('rlat',))
self.assertEqual(rlat.ndim, 1)
attr = []
attr.append(('axis', 'Y'))
attr.append(('long_name', 'rotated latitude'))
attr.append(('standard_name', 'grid_latitude'))
attr.append(('units', 'degrees'))
self.assertEqual(rlat.cf_attrs(), tuple(attr))
rlon = self.cfr.cf_group['rlon']
self.assertEqual(rlon.shape, (174,))
self.assertEqual(rlon.dimensions, ('rlon',))
self.assertEqual(rlon.ndim, 1)
attr = []
attr.append(('axis', 'X'))
attr.append(('long_name', 'rotated longitude'))
attr.append(('standard_name', 'grid_longitude'))
attr.append(('units', 'degrees'))
self.assertEqual(rlon.cf_attrs(), tuple(attr))
time = self.cfr.cf_group['time']
self.assertEqual(time.shape, (4,))
self.assertEqual(time.dimensions, ('time',))
self.assertEqual(time.ndim, 1)
attr = []
attr.append(('axis', 'T'))
attr.append(('bounds', 'time_bnds'))
attr.append(('calendar', 'gregorian'))
attr.append(('long_name', 'Julian Day'))
attr.append(('units', 'days since 1950-01-01 00:00:00.0'))
self.assertEqual(time.cf_attrs(), tuple(attr))
def test_data_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group.data_variables.keys()),
['pr'])
data = self.cfr.cf_group['pr']
self.assertEqual(data.shape, (4, 190, 174))
self.assertEqual(data.dimensions, ('time', 'rlat', 'rlon'))
self.assertEqual(data.ndim, 3)
attr = []
attr.append(('_FillValue', 1e+30))
attr.append(('cell_methods', 'time: mean'))
attr.append(('coordinates', 'lon lat'))
attr.append(('grid_mapping', 'rotated_pole'))
attr.append(('long_name', 'Precipitation'))
attr.append(('missing_value', 1e+30))
attr.append(('standard_name', 'precipitation_flux'))
attr.append(('units', 'kg m-2 s-1'))
attr = tuple(attr)
self.assertEqual(data.cf_attrs()[0][0], attr[0][0])
self.assertAlmostEqual(data.cf_attrs()[0][1], attr[0][1], delta=1.6e+22)
self.assertEqual(data.cf_attrs()[1:5], attr[1:5])
self.assertAlmostEqual(data.cf_attrs()[5][1], attr[5][1], delta=1.6e+22)
self.assertEqual(data.cf_attrs()[6:], attr[6:])
def test_formula_terms_pass_0(self):
self.assertEqual(self.cfr.cf_group.formula_terms, {})
def test_grid_mapping_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group.grid_mappings.keys()),
['rotated_pole'])
rotated_pole = self.cfr.cf_group['rotated_pole']
self.assertEqual(rotated_pole.shape, ())
self.assertEqual(rotated_pole.dimensions, ())
self.assertEqual(rotated_pole.ndim, 0)
attr = []
attr.append(('grid_mapping_name', 'rotated_latitude_longitude'))
attr.append(('grid_north_pole_latitude', 18.0))
attr.append(('grid_north_pole_longitude', -140.75))
self.assertEqual(rotated_pole.cf_attrs(), tuple(attr))
def test_cell_measures_pass_0(self):
self.assertEqual(self.cfr.cf_group.cell_measures, {})
def test_global_attributes_pass_0(self):
self.assertEqual(
sorted(self.cfr.cf_group.global_attributes.keys()),
['Conventions', 'NCO', 'experiment',
'history', 'institution', 'source',]
)
self.assertEqual(self.cfr.cf_group.global_attributes['Conventions'],
'CF-1.0')
self.assertEqual(self.cfr.cf_group.global_attributes['experiment'],
'ER3')
self.assertEqual(self.cfr.cf_group.global_attributes['institution'],
'DMI')
self.assertEqual(self.cfr.cf_group.global_attributes['source'],
'HIRHAM')
def test_variable_cf_group_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group['time'].cf_group.keys()),
['time_bnds'])
self.assertEqual(sorted(self.cfr.cf_group['pr'].cf_group.keys()),
['lat', 'lon', 'rlat', 'rlon', 'rotated_pole', 'time'])
def test_variable_attribute_touch_pass_0(self):
lat = self.cfr.cf_group['lat']
self.assertEqual(lat.cf_attrs(),
(('long_name', 'latitude'),
('standard_name', 'latitude'),
('units', 'degrees_north')))
self.assertEqual(lat.cf_attrs_used(), ())
self.assertEqual(lat.cf_attrs_unused(),
(('long_name', 'latitude'),
('standard_name', 'latitude'),
('units', 'degrees_north')))
# touch some variable attributes.
lat.long_name
lat.units
self.assertEqual(lat.cf_attrs_used(),
(('long_name', 'latitude'),
('units', 'degrees_north')))
self.assertEqual(lat.cf_attrs_unused(),
(('standard_name', 'latitude'),))
# clear the attribute touch history.
lat.cf_attrs_reset()
self.assertEqual(lat.cf_attrs_used(), ())
self.assertEqual(lat.cf_attrs_unused(),
(('long_name', 'latitude'),
('standard_name', 'latitude'),
('units', 'degrees_north')))
@tests.skip_data
class TestLoad(tests.IrisTest):
def test_attributes_empty(self):
filename = tests.get_data_path(('NetCDF', 'global', 'xyt',
'SMALL_hires_wind_u_for_ipcc4.nc'))
cube = iris.load_cube(filename)
self.assertEqual(cube.coord('time').attributes, {})
def test_attributes_contain_positive(self):
filename = tests.get_data_path(('NetCDF', 'global', 'xyt',
'SMALL_hires_wind_u_for_ipcc4.nc'))
cube = iris.load_cube(filename)
self.assertEqual(cube.coord('height').attributes['positive'], 'up')
def test_attributes_populated(self):
filename = tests.get_data_path(
('NetCDF', 'label_and_climate', 'small_FC_167_mon_19601101.nc'))
cube = iris.load_cube(filename)
self.assertEqual(
sorted(cube.coord('longitude').attributes.items()),
[('data_type', 'float'),
('modulo', 360),
('topology', 'circular'),
('valid_max', 359.0),
('valid_min', 0.0)])
def test_cell_methods(self):
filename = tests.get_data_path(('NetCDF', 'global', 'xyt', 'SMALL_hires_wind_u_for_ipcc4.nc'))
cube = iris.load_cube(filename)
self.assertEqual(cube.cell_methods,
(iris.coords.CellMethod(method=u'mean',
coords=(u'time', ),
intervals=(u'6 minutes', ),
comments=()), ))
@tests.skip_data
class TestClimatology(tests.IrisTest):
def setUp(self):
filename = tests.get_data_path(('NetCDF', 'label_and_climate',
'A1B-99999a-river-sep-2070-2099.nc'))
self.cfr = cf.CFReader(filename)
def test_bounds(self):
time = self.cfr.cf_group['temp_dmax_tmean_abs'].cf_group.coordinates['time']
climatology = time.cf_group.climatology
self.assertEqual(len(climatology), 1)
self.assertEqual(list(climatology.keys()), ['climatology_bounds'])
climatology_var = climatology['climatology_bounds']
self.assertEqual(climatology_var.ndim, 2)
self.assertEqual(climatology_var.shape, (1, 2))
@tests.skip_data
class TestLabels(tests.IrisTest):
def setUp(self):
filename = tests.get_data_path(
('NetCDF', 'label_and_climate',
'A1B-99999a-river-sep-2070-2099.nc'))
self.cfr_start = cf.CFReader(filename)
filename = tests.get_data_path(
('NetCDF', 'label_and_climate',
'small_FC_167_mon_19601101.nc'))
self.cfr_end = cf.CFReader(filename)
def test_label_dim_start(self):
cf_data_var = self.cfr_start.cf_group['temp_dmax_tmean_abs']
region_group = self.cfr_start.cf_group.labels['region_name']
self.assertEqual(sorted(self.cfr_start.cf_group.labels.keys()),
[u'region_name'])
self.assertEqual(sorted(cf_data_var.cf_group.labels.keys()),
[u'region_name'])
self.assertEqual(region_group.cf_label_dimensions(cf_data_var),
(u'georegion',))
self.assertEqual(region_group.cf_label_data(cf_data_var)[0],
'Anglian')
cf_data_var = self.cfr_start.cf_group['cdf_temp_dmax_tmean_abs']
self.assertEqual(sorted(self.cfr_start.cf_group.labels.keys()),
[u'region_name'])
self.assertEqual(sorted(cf_data_var.cf_group.labels.keys()),
[u'region_name'])
self.assertEqual(region_group.cf_label_dimensions(cf_data_var),
(u'georegion',))
self.assertEqual(region_group.cf_label_data(cf_data_var)[0],
'Anglian')
def test_label_dim_end(self):
cf_data_var = self.cfr_end.cf_group['tas']
self.assertEqual(sorted(self.cfr_end.cf_group.labels.keys()), [u'experiment_id', u'institution', u'source'])
self.assertEqual(sorted(cf_data_var.cf_group.labels.keys()), [u'experiment_id', u'institution', u'source'])
self.assertEqual(self.cfr_end.cf_group.labels['experiment_id'].cf_label_dimensions(cf_data_var), (u'ensemble',))
self.assertEqual(self.cfr_end.cf_group.labels['experiment_id'].cf_label_data(cf_data_var)[0], '2005')
self.assertEqual(self.cfr_end.cf_group.labels['institution'].cf_label_dimensions(cf_data_var), (u'ensemble',))
self.assertEqual(self.cfr_end.cf_group.labels['institution'].cf_label_data(cf_data_var)[0], 'ECMWF')
self.assertEqual(self.cfr_end.cf_group.labels['source'].cf_label_dimensions(cf_data_var), (u'ensemble',))
self.assertEqual(self.cfr_end.cf_group.labels['source'].cf_label_data(cf_data_var)[0], 'IFS33R1/HOPE-E, Sys 1, Met 1, ENSEMBLES')
if __name__ == "__main__":
tests.main()
| zak-k/iris | lib/iris/tests/test_cf.py | Python | gpl-3.0 | 14,796 | [
"NetCDF"
] | 118bf84f3d3f40669f82bb1e01bc70ef1e0d06568fa466e8af99a665fe0e027b |
"""
.. _sfm-track:
==================================================
Tracking with the Sparse Fascicle Model
==================================================
Tracking requires a per-voxel model. Here, the model is the Sparse Fascicle
Model, described in [Rokem2014]_. This model reconstructs the diffusion signal
as a combination of the signals from different fascicles (see also
:ref:`sfm-reconst`).
To begin, we read the Stanford HARDI data-set into memory:
"""
from dipy.data import read_stanford_labels
hardi_img, gtab, labels_img = read_stanford_labels()
data = hardi_img.get_data()
labels = labels_img.get_data()
affine = hardi_img.get_affine()
"""
This dataset provides a label map (generated using Freesurfer), in which the
white matter voxels are labeled as either 1 or 2:
"""
white_matter = (labels == 1) | (labels == 2)
"""
The first step in tracking is generating a model from which tracking directions
can be extracted in every voxel.
For the SFM, this requires first that we define a canonical response function
that will be used to deconvolve the signal in every voxel
"""
from dipy.reconst.csdeconv import auto_response
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
"""
We initialize an SFM model object, using this response function and using the
default sphere (362 vertices, symmetrically distributed on the surface of the
sphere):
"""
from dipy.data import get_sphere
sphere = get_sphere()
from dipy.reconst import sfm
sf_model = sfm.SparseFascicleModel(gtab, sphere=sphere,
l1_ratio=0.5, alpha=0.001,
response=response[0])
"""
We fit this model to the data in each voxel in the white-matter mask, so that
we can use these directions in tracking:
"""
from dipy.reconst.peaks import peaks_from_model
pnm = peaks_from_model(sf_model, data, sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
mask=white_matter,
parallel=True
)
"""
A ThresholdTissueClassifier object is used to segment the data to track only
through areas in which the Generalized Fractional Anisotropy (GFA) is
sufficiently high.
"""
from dipy.tracking.local import ThresholdTissueClassifier
classifier = ThresholdTissueClassifier(pnm.gfa, .25)
"""
Tracking will be started from a set of seeds evenly distributed in the white
matter:
"""
from dipy.tracking import utils
seeds = utils.seeds_from_mask(white_matter, density=[2, 2, 2], affine=affine)
"""
For the sake of brevity, we will take only the first 1000 seeds, generating
only 1000 streamlines. Remove this line to track from many more points in all of
the white matter
"""
seeds = seeds[:1000]
"""
We now have the necessary components to construct a tracking pipeline and
execute the tracking
"""
from dipy.tracking.local import LocalTracking
streamlines = LocalTracking(pnm, classifier, seeds, affine, step_size=.5)
streamlines = list(streamlines)
"""
Next, we will create a visualization of these streamlines, relative to this
subject's T1-weighted anatomy:
"""
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
from dipy.data import read_stanford_t1
from dipy.tracking.utils import move_streamlines
from numpy.linalg import inv
t1 = read_stanford_t1()
t1_data = t1.get_data()
t1_aff = t1.get_affine()
color = line_colors(streamlines)
"""
To speed up visualization, we will select a random sub-set of streamlines to
display. This is particularly important, if you track from seeds throughout the
entire white matter, generating many streamlines. In this case, for
demonstration purposes, we subselect 900 streamlines.
"""
from dipy.tracking.streamline import select_random_set_of_streamlines
plot_streamlines = select_random_set_of_streamlines(streamlines, 900)
streamlines_actor = fvtk.streamtube(
list(move_streamlines(plot_streamlines, inv(t1_aff))),
line_colors(streamlines))
vol_actor = fvtk.slicer(t1_data, voxsz=(1.0, 1.0, 1.0), plane_i=[40],
plane_j=None, plane_k=[35], outline=False)
ren = fvtk.ren()
fvtk.add(ren, streamlines_actor)
fvtk.add(ren, vol_actor)
fvtk.record(ren, n_frames=1, out_path='sfm_streamlines.png',
size=(800, 800))
"""
.. figure:: sfm_streamlines.png
:align: center
**Sparse Fascicle Model tracks**
Finally, we can save these streamlines to a 'trk' file, for use in other
software, or for further analysis.
"""
from dipy.io.trackvis import save_trk
save_trk("sfm_detr.trk", streamlines, affine, labels.shape)
"""
References
----------
.. [Rokem2014] Ariel Rokem, Jason D. Yeatman, Franco Pestilli, Kendrick
N. Kay, Aviv Mezer, Stefan van der Walt, Brian A. Wandell
(2014). Evaluating the accuracy of diffusion MRI models in white
matter. http://arxiv.org/abs/1411.0721
"""
| mdesco/dipy | doc/examples/sfm_tracking.py | Python | bsd-3-clause | 4,934 | [
"Brian"
] | 476560b424ccdd7f2226efec8ac60ae2370e38dc15afdff686776e758c25ee17 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
# First thing to do monkey patch external libraries like matplotlib,
# mayavi, numpy.
import morphforge.core.monkey_patching
from morphforge.core.mfrandom import MFRandom
from morphforge.core.mgrs import LocMgr, LogMgr, SettingsMgr, RCMgr
from morphforge.core.misc import merge_dictionaries, check_cstyle_varname
from morphforge.core.misc import is_iterable, FileIO
from morphforge.core.misc import SeqUtils, StrUtils
from morphforge.core.misc import ParameterSweepValues
from morphforge.core.objectnumberer import ObjectLabeller
from os.path import join as Join
from morphforge.core.plugindict import PluginDict
__all__ = [
'LocMgr',
'LogMgr',
'SettingsMgr',
'RCMgr',
'merge_dictionaries',
'FileIO',
'SeqUtils',
'StrUtils',
'check_cstyle_varname',
'is_iterable',
'ObjectLabeller',
'Join',
'PluginDict',
'MFRandom',
'ParameterSweepValues',
]
| mikehulluk/morphforge | src/morphforge/core/__init__.py | Python | bsd-2-clause | 2,453 | [
"Mayavi"
] | c98f3284afd9d997e6e5d5e8abcd78f657246a5e6d33422f2fa7e8185317689d |
from io import StringIO
from collections import defaultdict
from numpy.testing import (
assert_equal, assert_array_equal,)
import pytest
import MDAnalysis
from MDAnalysis.analysis.hydrogenbonds.wbridge_analysis import (
WaterBridgeAnalysis, )
class TestWaterBridgeAnalysis(object):
@staticmethod
@pytest.fixture(scope='class')
def universe_empty():
'''A universe with no hydrogen bonds'''
grofile = '''Test gro file
5
1ALA N 1 0.000 0.000 0.000
1ALA H 2 0.100 0.000 0.000
2SOL OW 3 3.000 0.000 0.000
4ALA H 4 0.500 0.000 0.000
4ALA N 5 0.600 0.000 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_DA():
'''A universe with one hydrogen bond acceptor bonding to a hydrogen bond
donor'''
grofile = '''Test gro file
3
1ALA N 1 0.000 0.000 0.000
1ALA H 2 0.100 0.000 0.000
4ALA O 3 0.300 0.000 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_DA_PBC():
'''A universe with one hydrogen bond acceptor bonding to a hydrogen bond
donor but in a PBC condition'''
grofile = '''Test gro file
3
1ALA N 1 0.800 0.000 0.000
1ALA H 2 0.900 0.000 0.000
4ALA O 3 0.100 0.000 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_AD():
'''A universe with one hydrogen bond donor bonding to a hydrogen bond
acceptor'''
grofile = '''Test gro file
3
1ALA O 1 0.000 0.000 0.000
4ALA H 2 0.200 0.000 0.000
4ALA N 3 0.300 0.000 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_loop():
'''A universe with one hydrogen bond acceptor bonding to a water which
bonds back to the first hydrogen bond acceptor and thus form a loop'''
grofile = '''Test gro file
5
1ALA O 1 0.000 0.001 0.000
2SOL OW 2 0.300 0.001 0.000
2SOL HW1 3 0.200 0.002 0.000
2SOL HW2 4 0.200 0.000 0.000
4ALA O 5 0.600 0.000 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_DWA():
'''A universe with one hydrogen bond donor bonding to a hydrogen bond
acceptor through a water'''
grofile = '''Test gro file
5
1ALA N 1 0.000 0.000 0.000
1ALA H 2 0.100 0.000 0.000
2SOL OW 3 0.300 0.000 0.000
2SOL HW2 4 0.400 0.000 0.000
4ALA O 5 0.600 0.000 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_DWD():
'''A universe with one hydrogen bond donor bonding to a hydrogen bond
donor through a water'''
grofile = '''Test gro file
5
1ALA N 1 0.000 0.000 0.000
1ALA H 2 0.100 0.000 0.000
2SOL OW 3 0.300 0.000 0.000
4ALA H 4 0.500 0.000 0.000
4ALA N 5 0.600 0.000 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_AWA():
'''A universe with two hydrogen bond acceptor are joined by a water'''
grofile = '''Test gro file
5
1ALA O 1 0.000 0.000 0.000
2SOL OW 2 0.300 0.000 0.000
2SOL HW1 3 0.200 0.000 0.000
2SOL HW2 4 0.400 0.000 0.000
4ALA O 5 0.600 0.000 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_AWD():
'''A universe with one hydrogen bond acceptor bonding to a hydrogen
bond donor through a water'''
grofile = '''Test gro file
5
1ALA O 1 0.000 0.000 0.000
2SOL OW 2 0.300 0.000 0.000
2SOL HW1 3 0.200 0.000 0.000
4ALA H 4 0.500 0.000 0.000
4ALA N 5 0.600 0.000 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_AWWA():
'''A universe with one hydrogen bond acceptor bonding to a hydrogen bond
acceptor through two waters'''
grofile = '''Test gro file
7
1ALA O 1 0.000 0.000 0.000
2SOL OW 2 0.300 0.000 0.000
2SOL HW1 3 0.200 0.000 0.000
2SOL HW2 4 0.400 0.000 0.000
3SOL OW 5 0.600 0.000 0.000
3SOL HW1 6 0.700 0.000 0.000
4ALA O 7 0.900 0.000 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_AWWWA():
'''A universe with one hydrogen bond acceptor bonding to a hydrogen bond
acceptor through three waters'''
grofile = '''Test gro file
9
1ALA O 1 0.000 0.000 0.000
2SOL OW 2 0.300 0.000 0.000
2SOL HW1 3 0.200 0.000 0.000
2SOL HW2 4 0.400 0.000 0.000
3SOL OW 5 0.600 0.000 0.000
3SOL HW1 6 0.700 0.000 0.000
4SOL OW 7 0.900 0.000 0.000
4SOL HW1 8 1.000 0.000 0.000
5ALA O 9 1.200 0.000 0.000
10.0 10.0 10.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_AWWWWA():
'''A universe with one hydrogen bond acceptor bonding to a hydrogen bond
acceptor through three waters'''
grofile = '''Test gro file
11
1ALA O 1 0.000 0.000 0.000
2SOL OW 2 0.300 0.000 0.000
2SOL HW1 3 0.200 0.000 0.000
2SOL HW2 4 0.400 0.000 0.000
3SOL OW 5 0.600 0.000 0.000
3SOL HW1 6 0.700 0.000 0.000
4SOL OW 7 0.900 0.000 0.000
4SOL HW1 8 1.000 0.000 0.000
5SOL OW 9 1.200 0.000 0.000
5SOL HW1 10 1.300 0.000 0.000
6ALA O 11 1.400 0.000 0.000
10.0 10.0 10.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_branch():
'''A universe with one hydrogen bond acceptor bonding to two hydrogen
bond acceptor in selection 2'''
grofile = '''Test gro file
9
1ALA O 1 0.000 0.000 0.000
2SOL OW 2 0.300 0.000 0.000
2SOL HW1 3 0.200 0.000 0.000
2SOL HW2 4 0.400 0.000 0.000
3SOL OW 5 0.600 0.000 0.000
3SOL HW1 6 0.700 0.000 0.000
3SOL HW2 7 0.600 0.100 0.000
4ALA O 8 0.900 0.000 0.000
5ALA O 9 0.600 0.300 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def universe_AWA_AWWA():
'''A universe with one hydrogen bond acceptors are bonded through one or
two water'''
grofile = '''Test gro file
12
1ALA O 1 0.000 0.000 0.000
2SOL OW 2 0.300 0.000 0.000
2SOL HW1 3 0.200 0.000 0.000
2SOL HW2 4 0.400 0.000 0.000
4ALA O 5 0.600 0.000 0.000
5ALA O 6 0.000 1.000 0.000
6SOL OW 7 0.300 1.000 0.000
6SOL HW1 8 0.200 1.000 0.000
6SOL HW2 9 0.400 1.000 0.000
7SOL OW 10 0.600 1.000 0.000
7SOL HW1 11 0.700 1.000 0.000
8ALA O 12 0.900 1.000 0.000
1.0 1.0 1.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
return u
@staticmethod
@pytest.fixture(scope='class')
def wb_multiframe():
'''A water bridge object with multipley frames'''
grofile = '''Test gro file
13
1ALA O 1 0.000 0.000 0.000
1ALA H 2 0.000 0.000 0.000
2SOL OW 3 0.300 0.000 0.000
2SOL HW1 4 0.200 0.000 0.000
2SOL HW2 5 0.400 0.000 0.000
3SOL OW 6 0.600 0.000 0.000
3SOL HW1 7 0.700 0.000 0.000
4SOL OW 8 0.900 0.000 0.000
4SOL HW1 9 1.000 0.000 0.000
5SOL OW 10 1.200 0.000 0.000
5SOL HW1 11 1.300 0.000 0.000
6ALA H 12 1.400 0.000 0.000
6ALA O 13 1.400 0.000 0.000
10.0 10.0 10.0'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
wb = WaterBridgeAnalysis(u, 'protein and (resid 1)', 'protein and (resid 4)',
order=4)
# Build an dummy WaterBridgeAnalysis object for testing
wb.results.network = []
wb.results.network.append({(1, 0, 12, None, 2.0, 180.0): None})
wb.results.network.append({(0, None, 12, 13, 2.0, 180.0): None})
wb.results.network.append({(1, 0, 3, None, 2.0, 180.0):
{(4, 2, 12, None, 2.0, 180.0): None}})
wb.results.network.append({(0, None, 3, 2, 2.0, 180.0):
{(4, 2, 5, None, 2.0, 180.0):
{(5, None, 11, 12, 2.0, 180.0): None}}})
wb.timesteps = range(len(wb.results.network))
return wb
def test_nodata(self, universe_DA):
'''Test if the funtions can run when there is no data.
This is achieved by not runing the run() first.'''
wb = WaterBridgeAnalysis(universe_DA, 'protein and (resid 1)',
'protein and (resid 4)', order=0)
wb.generate_table()
assert_equal(wb.timesteps_by_type(), None)
assert_equal(wb.count_by_time(), None)
assert_equal(wb.count_by_type(), None)
def test_selection_type_error(self, universe_DA):
'''Test the case when the wrong selection1_type is given'''
try:
wb = WaterBridgeAnalysis(universe_DA, 'protein and (resid 1)',
'protein and (resid 4)', order=0, selection1_type='aaa')
except ValueError:
pass
else:
raise pytest.fail("selection_type aaa should rasie error")
def test_empty_selection(self, universe_DA):
'''Test the case when selection yields empty result'''
wb = WaterBridgeAnalysis(universe_DA, 'protein and (resid 9)',
'protein and (resid 10)', order=0)
wb.run()
assert wb.results.network == [{}]
def test_loop(self, universe_loop):
'''Test if loop can be handled correctly'''
wb = WaterBridgeAnalysis(universe_loop, 'protein and (resid 1)',
'protein and (resid 1 or resid 4)')
wb.run()
assert_equal(len(wb.results.network[0].keys()), 2)
def test_donor_accepter(self, universe_DA):
'''Test zeroth order donor to acceptor hydrogen bonding'''
wb = WaterBridgeAnalysis(universe_DA, 'protein and (resid 1)',
'protein and (resid 4)', order=0, update_selection=True, debug=True)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (1, 0, 2, None))
def test_donor_accepter_heavy(self, universe_DA):
'''Test zeroth order donor to acceptor hydrogen bonding'''
wb = WaterBridgeAnalysis(universe_DA, 'protein and (resid 1)',
'protein and (resid 4)', order=0, update_selection=True, debug=True, distance_type='heavy')
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (1, 0, 2, None))
def test_donor_accepter_pbc(self, universe_DA_PBC):
'''Test zeroth order donor to acceptor hydrogen bonding in PBC conditions'''
wb = WaterBridgeAnalysis(universe_DA_PBC, 'protein and (resid 1)',
'protein and (resid 4)', order=0, pbc=True)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (1, 0, 2, None))
def test_accepter_donor(self, universe_AD):
'''Test zeroth order acceptor to donor hydrogen bonding'''
wb = WaterBridgeAnalysis(universe_AD, 'protein and (resid 1)',
'protein and (resid 4)', order=0)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (0, None, 1, 2))
def test_acceptor_water_accepter(self, universe_AWA):
'''Test case where the hydrogen bond acceptor from selection 1 form
water bridge with hydrogen bond acceptor from selection 2'''
wb = WaterBridgeAnalysis(universe_AWA, 'protein and (resid 1)',
'protein and (resid 4)')
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (0, None, 2, 1))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (3, 1, 4, None))
assert_equal(second[list(second.keys())[0]], None)
def test_donor_water_accepter(self, universe_DWA):
'''Test case where the hydrogen bond donor from selection 1 form
water bridge with hydrogen bond acceptor from selection 2'''
wb = WaterBridgeAnalysis(universe_DWA, 'protein and (resid 1)',
'protein and (resid 4)')
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (1, 0, 2, None))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (3, 2, 4, None))
assert_equal(second[list(second.keys())[0]], None)
def test_acceptor_water_donor(self, universe_AWD):
'''Test case where the hydrogen bond acceptor from selection 1 form
water bridge with hydrogen bond donor from selection 2'''
wb = WaterBridgeAnalysis(universe_AWD, 'protein and (resid 1)',
'protein and (resid 4)')
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (0, None, 2, 1))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (1, None, 3, 4))
assert_equal(second[list(second.keys())[0]], None)
def test_donor_water_donor(self, universe_DWD):
'''Test case where the hydrogen bond donor from selection 1 form
water bridge with hydrogen bond donor from selection 2'''
wb = WaterBridgeAnalysis(universe_DWD, 'protein and (resid 1)',
'protein and (resid 4)')
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (1, 0, 2, None))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (2, None, 3, 4))
assert_equal(second[list(second.keys())[0]], None)
def test_empty(self, universe_empty):
'''Test case where no water bridge exists'''
wb = WaterBridgeAnalysis(universe_empty, 'protein', 'protein')
wb.run(verbose=False)
assert_equal(wb.results.network[0], defaultdict(dict))
def test_same_selection(self, universe_DWA):
'''
This test tests that if the selection 1 and selection 2 are both protein.
However, the protein only forms one hydrogen bond with the water.
This entry won't be included.
'''
wb = WaterBridgeAnalysis(universe_DWA, 'protein and resid 1',
'protein and resid 1')
wb.run(verbose=False)
assert_equal(wb.results.network[0], defaultdict(dict))
def test_acceptor_2water_accepter(self, universe_AWWA):
'''Test case where the hydrogen bond acceptor from selection 1 form second order
water bridge with hydrogen bond acceptor from selection 2'''
# test first order
wb = WaterBridgeAnalysis(universe_AWWA, 'protein and (resid 1)',
'protein and (resid 4)')
wb.run(verbose=False)
assert_equal(wb.results.network[0], defaultdict(dict))
# test second order
wb = WaterBridgeAnalysis(universe_AWWA, 'protein and (resid 1)',
'protein and (resid 4)', order=2)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (0, None, 2, 1))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (3, 1, 4, None))
third = second[list(second.keys())[0]]
assert_equal(list(third.keys())[0][:4], (5, 4, 6, None))
assert_equal(third[list(third.keys())[0]], None)
# test third order
wb = WaterBridgeAnalysis(universe_AWWA, 'protein and (resid 1)',
'protein and (resid 4)', order=3)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (0, None, 2, 1))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (3, 1, 4, None))
third = second[list(second.keys())[0]]
assert_equal(list(third.keys())[0][:4], (5, 4, 6, None))
assert_equal(third[list(third.keys())[0]], None)
def test_acceptor_3water_accepter(self, universe_AWWWA):
'''Test case where the hydrogen bond acceptor from selection 1 form third order
water bridge with hydrogen bond acceptor from selection 2'''
wb = WaterBridgeAnalysis(universe_AWWWA, 'protein and (resid 1)',
'protein and (resid 5)', order=2)
wb.run(verbose=False)
assert_equal(wb.results.network[0], defaultdict(dict))
wb = WaterBridgeAnalysis(universe_AWWWA, 'protein and (resid 1)',
'protein and (resid 5)', order=3)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (0, None, 2, 1))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (3, 1, 4, None))
third = second[list(second.keys())[0]]
assert_equal(list(third.keys())[0][:4], (5, 4, 6, None))
fourth = third[list(third.keys())[0]]
assert_equal(list(fourth.keys())[0][:4], (7, 6, 8, None))
assert_equal(fourth[list(fourth.keys())[0]], None)
wb = WaterBridgeAnalysis(universe_AWWWA, 'protein and (resid 1)',
'protein and (resid 5)', order=4)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (0, None, 2, 1))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (3, 1, 4, None))
third = second[list(second.keys())[0]]
assert_equal(list(third.keys())[0][:4], (5, 4, 6, None))
fourth = third[list(third.keys())[0]]
assert_equal(list(fourth.keys())[0][:4], (7, 6, 8, None))
assert_equal(fourth[list(fourth.keys())[0]], None)
def test_acceptor_4water_accepter(self, universe_AWWWWA):
'''Test case where the hydrogen bond acceptor from selection 1 form fourth order
water bridge with hydrogen bond acceptor from selection 2'''
wb = WaterBridgeAnalysis(universe_AWWWWA, 'protein and (resid 1)',
'protein and (resid 6)', order=3)
wb.run(verbose=False)
assert_equal(wb.results.network[0], defaultdict(dict))
wb = WaterBridgeAnalysis(universe_AWWWWA, 'protein and (resid 1)',
'protein and (resid 6)', order=4)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (0, None, 2, 1))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (3, 1, 4, None))
third = second[list(second.keys())[0]]
assert_equal(list(third.keys())[0][:4], (5, 4, 6, None))
fourth = third[list(third.keys())[0]]
assert_equal(list(fourth.keys())[0][:4], (7, 6, 8, None))
fifth = fourth[list(fourth.keys())[0]]
assert_equal(list(fifth.keys())[0][:4], (9, 8, 10, None))
assert_equal(fifth[list(fifth.keys())[0]], None)
wb = WaterBridgeAnalysis(universe_AWWWWA, 'protein and (resid 1)',
'protein and (resid 6)', order=5)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (0, None, 2, 1))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (3, 1, 4, None))
third = second[list(second.keys())[0]]
assert_equal(list(third.keys())[0][:4], (5, 4, 6, None))
fourth = third[list(third.keys())[0]]
assert_equal(list(fourth.keys())[0][:4], (7, 6, 8, None))
fifth = fourth[list(fourth.keys())[0]]
assert_equal(list(fifth.keys())[0][:4], (9, 8, 10, None))
assert_equal(fifth[list(fifth.keys())[0]], None)
def test_acceptor_22water_accepter(self, universe_branch):
'''Test case where the hydrogen bond acceptor from selection 1 form a second order
water bridge with hydrogen bond acceptor from selection 2
and the last water is linked to two residues in selection 2'''
wb = WaterBridgeAnalysis(universe_branch, 'protein and (resid 1)',
'protein and (resid 4 or resid 5)', order=2)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (0, None, 2, 1))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (3, 1, 4, None))
third = second[list(second.keys())[0]]
assert_equal([(5, 4, 7, None), (6, 4, 8, None)],
sorted([key[:4] for key in list(third.keys())]))
def test_timeseries_wba(self, universe_branch):
'''Test if the time series data is correctly generated in water bridge analysis format'''
wb = WaterBridgeAnalysis(universe_branch, 'protein and (resid 1)',
'protein and (resid 4 or resid 5)', order=2)
wb.output_format = 'sele1_sele2'
wb.run(verbose=False)
timeseries = sorted(wb.results.timeseries[0])
assert_equal(timeseries[0][:4], (0, 2, ('ALA', 1, 'O'), ('SOL', 2, 'HW1')))
assert_equal(timeseries[1][:4], (3, 4, ('SOL', 2, 'HW2'), ('SOL', 3, 'OW')))
assert_equal(timeseries[2][:4], (5, 7, ('SOL', 3, 'HW1'), ('ALA', 4, 'O')))
assert_equal(timeseries[3][:4], (6, 8, ('SOL', 3, 'HW2'), ('ALA', 5, 'O')))
def test_timeseries_hba(self, universe_branch):
'''Test if the time series data is correctly generated in hydrogen bond analysis format'''
wb = WaterBridgeAnalysis(universe_branch, 'protein and (resid 1)',
'protein and (resid 4 or resid 5)', order=2)
wb.output_format = 'donor_acceptor'
wb.run(verbose=False)
timeseries = sorted(wb.results.timeseries[0])
assert_equal(timeseries[0][:4], (2, 0, ('SOL', 2, 'HW1'), ('ALA', 1, 'O')))
assert_equal(timeseries[1][:4], (3, 4, ('SOL', 2, 'HW2'), ('SOL', 3, 'OW')))
assert_equal(timeseries[2][:4], (5, 7, ('SOL', 3, 'HW1'), ('ALA', 4, 'O')))
assert_equal(timeseries[3][:4], (6, 8, ('SOL', 3, 'HW2'), ('ALA', 5, 'O')))
def test_acceptor_12water_accepter(self, universe_AWA_AWWA):
'''Test of independent first order and second can be recognised correctely'''
wb = WaterBridgeAnalysis(universe_AWA_AWWA, 'protein and (resid 1 or resid 5)',
'protein and (resid 4 or resid 8)', order=1)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal(list(network.keys())[0][:4], (0, None, 2, 1))
second = network[list(network.keys())[0]]
assert_equal(list(second.keys())[0][:4], (3, 1, 4, None))
assert_equal(second[list(second.keys())[0]], None)
network = wb.results.network[0]
wb = WaterBridgeAnalysis(universe_AWA_AWWA, 'protein and (resid 1 or resid 5)',
'protein and (resid 4 or resid 8)', order=2)
wb.run(verbose=False)
network = wb.results.network[0]
assert_equal([(0, None, 2, 1), (5, None, 7, 6)],
sorted([key[:4] for key in list(network.keys())]))
def test_count_by_type_single_link(self, universe_DWA):
'''
This test tests the simplest water bridge to see if count_by_type() works.
'''
wb = WaterBridgeAnalysis(universe_DWA, 'protein and (resid 1)',
'protein and (resid 4)')
wb.run(verbose=False)
assert_equal(wb.count_by_type(), [(1, 4, 'ALA', 1, 'H', 'ALA', 4, 'O', 1.)])
def test_count_by_type_multiple_link(self, universe_AWA_AWWA):
'''
This test tests if count_by_type() can give the correct result for more than 1 links.
'''
wb = WaterBridgeAnalysis(universe_AWA_AWWA, 'protein and (resid 1 or resid 5)',
'protein and (resid 4 or resid 8)', order=2)
wb.run(verbose=False)
assert_equal(sorted(wb.count_by_type()),
[[0, 4, 'ALA', 1, 'O', 'ALA', 4, 'O', 1.0],
[5, 11, 'ALA', 5, 'O', 'ALA', 8, 'O', 1.0]])
def test_count_by_type_multiple_frame(self, wb_multiframe):
'''
This test tests if count_by_type() works in multiply situations.
:return:
'''
result = [[0, 11, 'ALA', 1, 'O', 'ALA', 6, 'H', 0.25],
[0, 12, 'ALA', 1, 'O', 'ALA', 6, 'O', 0.25],
[1, 12, 'ALA', 1, 'H', 'ALA', 6, 'O', 0.5]]
assert_equal(sorted(wb_multiframe.count_by_type()), result)
def test_count_by_type_filter(self, wb_multiframe):
'''
This test tests if modifying analysis_func
allows some results to be filtered out in count_by_type().
:return:
'''
def analysis(current, output, u):
sele1_index, sele1_heavy_index, atom2, heavy_atom2, dist, angle = current[0]
atom1, heavy_atom1, sele2_index, sele2_heavy_index, dist, angle = current[-1]
sele1 = u.atoms[sele1_index]
sele2 = u.atoms[sele2_index]
(s1_resname, s1_resid, s1_name) = (sele1.resname, sele1.resid, sele1.name)
(s2_resname, s2_resid, s2_name) = (sele2.resname, sele2.resid, sele2.name)
key = (sele1_index, sele2_index, s1_resname, s1_resid, s1_name, s2_resname, s2_resid, s2_name)
if s2_name == 'H':
output[key] += 1
result = [((0, 11, 'ALA', 1, 'O', 'ALA', 6, 'H'), 0.25)]
assert_equal(sorted(wb_multiframe.count_by_type(analysis_func=analysis)), result)
def test_count_by_type_merge(self, wb_multiframe):
'''
This test tests if modifying analysis_func
allows some same residue to be merged in count_by_type().
'''
def analysis(current, output, u):
sele1_index, sele1_heavy_index, atom2, heavy_atom2, dist, angle = current[0]
atom1, heavy_atom1, sele2_index, sele2_heavy_index, dist, angle = current[-1]
sele1 = u.atoms[sele1_index]
sele2 = u.atoms[sele2_index]
(s1_resname, s1_resid, s1_name) = (sele1.resname, sele1.resid, sele1.name)
(s2_resname, s2_resid, s2_name) = (sele2.resname, sele2.resid, sele2.name)
key = (s1_resname, s1_resid, s2_resname, s2_resid)
output[key] = 1
result = [(('ALA', 1, 'ALA', 6), 1.0)]
assert_equal(sorted(wb_multiframe.count_by_type(analysis_func=analysis)), result)
def test_count_by_type_order(self, wb_multiframe):
'''
This test tests if modifying analysis_func
allows the order of water bridge to be separated in count_by_type().
:return:
'''
def analysis(current, output, u):
sele1_index, sele1_heavy_index, atom2, heavy_atom2, dist, angle = current[0]
atom1, heavy_atom1, sele2_index, sele2_heavy_index, dist, angle = current[-1]
sele1 = u.atoms[sele1_index]
sele2 = u.atoms[sele2_index]
(s1_resname, s1_resid, s1_name) = (sele1.resname, sele1.resid, sele1.name)
(s2_resname, s2_resid, s2_name) = (sele2.resname, sele2.resid, sele2.name)
key = (s1_resname, s1_resid, s2_resname, s2_resid, len(current)-1)
output[key] = 1
result = [(('ALA', 1, 'ALA', 6, 0), 0.5),
(('ALA', 1, 'ALA', 6, 1), 0.25),
(('ALA', 1, 'ALA', 6, 2), 0.25)]
assert_equal(sorted(wb_multiframe.count_by_type(analysis_func=analysis)), result)
def test_count_by_time(self, wb_multiframe):
'''
This test tests if count_by_times() works.
:return:
'''
assert_equal(wb_multiframe.count_by_time(), [(0, 1), (1, 1), (2, 1), (3, 1)])
def test_count_by_time_weight(self, universe_AWA_AWWA):
'''
This test tests if modyfing the analysis_func allows the weight to be changed
in count_by_type().
:return:
'''
wb = WaterBridgeAnalysis(universe_AWA_AWWA, 'protein and (resid 1 or resid 5)',
'protein and (resid 4 or resid 8)', order=2)
wb.run(verbose=False)
def analysis(current, output, u):
sele1_index, sele1_heavy_index, atom2, heavy_atom2, dist, angle = current[0]
atom1, heavy_atom1, sele2_index, sele2_heavy_index, dist, angle = current[-1]
sele1 = u.atoms[sele1_index]
sele2 = u.atoms[sele2_index]
(s1_resname, s1_resid, s1_name) = (sele1.resname, sele1.resid, sele1.name)
(s2_resname, s2_resid, s2_name) = (sele2.resname, sele2.resid, sele2.name)
key = (s1_resname, s1_resid, s2_resname, s2_resid)
output[key] += len(current)-1
assert_equal(wb.count_by_time(analysis_func=analysis), [(0,3), ])
def test_count_by_time_empty(self, universe_AWA_AWWA):
'''
See if count_by_time() can handle zero well.
:return:
'''
wb = WaterBridgeAnalysis(universe_AWA_AWWA, 'protein and (resid 1 or resid 5)',
'protein and (resid 4 or resid 8)', order=2)
wb.run(verbose=False)
def analysis(current, output, u):
pass
assert_equal(wb.count_by_time(analysis_func=analysis), [(0,0), ])
def test_generate_table_hba(self, wb_multiframe):
'''Test generate table using hydrogen bond analysis format'''
table = wb_multiframe.generate_table(output_format='donor_acceptor')
assert_array_equal(
sorted(table.donor_resid),
[1, 1, 2, 2, 2, 6, 6],
)
def test_generate_table_s1s2(self, wb_multiframe):
'''Test generate table using hydrogen bond analysis format'''
table = wb_multiframe.generate_table(output_format='sele1_sele2')
assert_array_equal(
sorted(table.sele1_resid),
[1, 1, 1, 1, 2, 2, 3],
)
def test_timesteps_by_type(self, wb_multiframe):
'''Test the timesteps_by_type function'''
timesteps = sorted(wb_multiframe.timesteps_by_type())
assert_array_equal(timesteps[3], [1, 12, 'ALA', 1, 'H', 'ALA', 6, 'O', 0, 2])
def test_duplicate_water(self):
'''A case #3119 where
Acceptor···H−O···H-Donor
|
H···O-H
will be recognised as 3rd order water bridge.
'''
grofile = '''Test gro file
7
1LEU O 1 1.876 0.810 1.354
117SOL HW1 2 1.853 0.831 1.162
117SOL OW 3 1.877 0.890 1.081
117SOL HW2 4 1.908 0.828 1.007
135SOL OW 5 1.924 0.713 0.845
1LEU H 6 1.997 0.991 1.194
1LEU N 7 2.041 1.030 1.274
2.22092 2.22092 2.22092'''
u = MDAnalysis.Universe(StringIO(grofile), format='gro')
wb = WaterBridgeAnalysis(u, 'resname LEU and name O',
'resname LEU and name N H', order=4)
wb.run()
assert len(wb.results.timeseries[0]) == 2
def test_warn_results_deprecated(self, universe_DA):
wb = WaterBridgeAnalysis(universe_DA, 'protein and (resid 9)',
'protein and (resid 10)', order=0)
wb.run()
wmsg = "The `network` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert_equal(wb.network, wb.results.network)
wmsg = "The `timeseries` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert_equal(wb.timeseries, wb.results.timeseries)
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/analysis/test_wbridge.py | Python | gpl-2.0 | 34,326 | [
"MDAnalysis"
] | 6d8fc4bc6de530d0ef4f085b1a13170f93d6312ab92604a7496701abbc4b93da |
#!/usr/bin/env python3
"""
refguide_check.py [OPTIONS] [-- ARGS]
- Check for a NumPy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
- Check docstring examples
- Check example blocks in RST files
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings::
$ python tools/refguide_check.py --doctests ma
or in RST-based documentations::
$ python tools/refguide_check.py --rst doc/source
"""
import copy
import doctest
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
import docutils.core
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from docutils.parsers.rst import directives
from pkg_resources import parse_version
import sphinx
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
SKIPBLOCK = doctest.register_optionflag('SKIPBLOCK')
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "numpy"
PUBLIC_SUBMODULES = [
'core',
'f2py',
'linalg',
'lib',
'lib.recfunctions',
'fft',
'ma',
'polynomial',
'matrixlib',
'random',
'testing',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
#
# Optionally, a subset of methods can be skipped by setting dict-values
# to a container of method-names
DOCTEST_SKIPDICT = {
# cases where NumPy docstrings import things from SciPy:
'numpy.lib.vectorize': None,
'numpy.random.standard_gamma': None,
'numpy.random.gamma': None,
'numpy.random.vonmises': None,
'numpy.random.power': None,
'numpy.random.zipf': None,
# remote / local file IO with DataSource is problematic in doctest:
'numpy.lib.DataSource': None,
'numpy.lib.Repository': None,
}
if sys.version_info < (3, 9):
DOCTEST_SKIPDICT.update({
"numpy.core.ndarray": {"__class_getitem__"},
"numpy.core.dtype": {"__class_getitem__"},
"numpy.core.number": {"__class_getitem__"},
})
# Skip non-numpy RST files, historical release notes
# Any single-directory exact match will skip the directory and all subdirs.
# Any exact match (like 'doc/release') will scan subdirs but skip files in
# the matched directory.
# Any filename will skip that file
RST_SKIPLIST = [
'scipy-sphinx-theme',
'sphinxext',
'neps',
'changelog',
'doc/release',
'doc/source/release',
'doc/release/upcoming_changes',
'c-info.ufunc-tutorial.rst',
'c-info.python-as-glue.rst',
'f2py.getting-started.rst',
'f2py-examples.rst',
'arrays.nditer.cython.rst',
# See PR 17222, these should be fixed
'basics.dispatch.rst',
'basics.subclassing.rst',
'basics.interoperability.rst',
'misc.rst',
]
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
# NOTE: should NumPy have a better match between autosummary
# listings and __all__? For now, TR isn't convinced this is a
# priority -- focus on just getting docstrings executed / correct
r'numpy\.*',
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
Parameters
----------
path : str or None
cwd : str or None
Returns
-------
str
Relative path or absolute path based on current working directory
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
"""
Finds the occurrences of function names, special directives like data
and functions and scipy constants in the docstrings of `module`. The
following patterns are searched for:
* 3 spaces followed by function name, and maybe some spaces, some
dashes, and an explanation; only function names listed in
refguide are formatted like this (mostly, there may be some false
positives
* special directives, such as data and function
* (scipy.constants only): quoted list
The `names_dict` is updated by reference and accessible in calling method
Parameters
----------
module : ModuleType
The module, whose docstrings is to be searched
names_dict : dict
Dictionary which contains module name as key and a set of found
function names and directives as value
Returns
-------
None
"""
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""
Return a copy of the __all__ dict with irrelevant items removed.
Parameters
----------
module : ModuleType
The module whose __all__ dict has to be processed
Returns
-------
deprecated : list
List of callable and deprecated sub modules
not_deprecated : list
List of non callable or non deprecated sub modules
others : list
List of remaining types of sub modules
"""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
if not all_dict:
# Must be a pure documentation module
all_dict.append('__doc__')
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""
Return sets of objects from all_dict.
Will return three sets:
{in module_name.__all__},
{in REFGUIDE*},
and {missing from others}
Parameters
----------
all_dict : list
List of non deprecated sub modules for module_name
others : list
List of sub modules for module_name
names : set
Set of function names or special directives present in
docstring of module_name
module_name : ModuleType
Returns
-------
only_all : set
only_ref : set
missing : set
"""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
"""
Check if module `f` is deprecated
Parameters
----------
f : ModuleType
Returns
-------
bool
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
"""
Check that `all_dict` is consistent with the `names` in `module_name`
For instance, that there are no deprecated or extra objects.
Parameters
----------
all_dict : list
names : set
deprecated : list
others : list
module_name : ModuleType
dots : bool
Whether to print a dot for each check
Returns
-------
list
List of [(name, success_flag, output)...]
"""
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = only_ref.intersection(deprecated)
only_ref = only_ref.difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
"""
Validates the doc string in a snippet of documentation
`text` from file `name`
Parameters
----------
text : str
Docstring text
name : str
File name for which the doc string is to be validated
dots : bool
Whether to print a dot symbol for each check
Returns
-------
(bool, str)
"""
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'doc', 'currentmodule', 'autosummary', 'data', 'attr',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor', 'term', 'c:member',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Parameters
----------
module : ModuleType
names : set
Returns
-------
result : list
List of [(module_name, success_flag, output),...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'numpy': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,
'StringIO': io.StringIO,
}
class DTRunner(doctest.DocTestRunner):
"""
The doctest runner
"""
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
"""
Check the docstrings
"""
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary",
"# uninitialized", "#uninitialized"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = CHECK_NAMESPACE
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = (r'[\w\d_]+\(' +
', '.join([r'[\w\d_]+=(.+)']*num) +
r'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""
Run modified doctests for the set of `tests`.
Parameters
----------
tests : list
full_name : str
verbose : bool
doctest_warnings : bool
Returns
-------
tuple(bool, list)
Tuple of (success, output)
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \
redirect_stderr(tmp_stderr):
# try to ensure random seed is NOT reproducible
np.random.seed(None)
ns = {}
for t in tests:
# We broke the tests up into chunks to try to avoid PSEUDOCODE
# This has the unfortunate side effect of restarting the global
# namespace for each test chunk, so variables will be "lost" after
# a chunk. Chain the globals to avoid this
t.globs.update(ns)
t.filename = short_path(t.filename, cwd)
# Process our options
if any([SKIPBLOCK in ex.options for ex in t.examples]):
continue
fails, successes = runner.run(t, out=output.write, clear_globs=False)
if fails > 0:
success = False
ns = t.globs
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in docstrings of the module's public symbols.
Parameters
----------
module : ModuleType
Name of module
verbose : bool
Should the result be verbose
ns : dict
Name space of module
dots : bool
doctest_warnings : bool
Returns
-------
results : list
List of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPDICT:
skip_methods = DOCTEST_SKIPDICT[full_name]
if skip_methods is None:
continue
else:
skip_methods = None
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
if skip_methods is not None:
tests = [i for i in tests if
i.name.partition(".")[2] not in skip_methods]
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Parameters
----------
fname : str
File name
verbose : bool
ns : dict
Name space
dots : bool
doctest_warnings : bool
Returns
-------
list
List of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
if ns is None:
ns = CHECK_NAMESPACE
results = []
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPDICT:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
base_line_no = 0
for part in text.split('\n\n'):
try:
tests = parser.get_doctest(part, ns, fname, fname, base_line_no)
except ValueError as e:
if e.args[0].startswith('line '):
# fix line number since `parser.get_doctest` does not increment
# the reported line number by base_line_no in the error message
parts = e.args[0].split()
parts[1] = str(int(parts[1]) + base_line_no)
e.args = (' '.join(parts),) + e.args[1:]
raise
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts.append((part, base_line_no))
base_line_no += part.count('\n') + 2
# Reassemble the good bits and doctest them:
tests = []
for good_text, line_no in good_parts:
tests.append(parser.get_doctest(good_text, ns, fname, fname, line_no))
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def iter_included_files(base_path, verbose=0, suffixes=('.rst',)):
"""
Generator function to walk `base_path` and its subdirectories, skipping
files or directories in RST_SKIPLIST, and yield each file with a suffix in
`suffixes`
Parameters
----------
base_path : str
Base path of the directory to be processed
verbose : int
suffixes : tuple
Yields
------
path
Path of the directory and its sub directories
"""
if os.path.exists(base_path) and os.path.isfile(base_path):
yield base_path
for dir_name, subdirs, files in os.walk(base_path, topdown=True):
if dir_name in RST_SKIPLIST:
if verbose > 0:
sys.stderr.write('skipping files in %s' % dir_name)
files = []
for p in RST_SKIPLIST:
if p in subdirs:
if verbose > 0:
sys.stderr.write('skipping %s and subdirs' % p)
subdirs.remove(p)
for f in files:
if (os.path.splitext(f)[1] in suffixes and
f not in RST_SKIPLIST):
yield os.path.join(dir_name, f)
def check_documentation(base_path, results, args, dots):
"""
Check examples in any *.rst located inside `base_path`.
Add the output to `results`.
See Also
--------
check_doctests_testfile
"""
for filename in iter_included_files(base_path, args.verbose):
if dots:
sys.stderr.write(filename + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(
filename,
(args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
# stub out a "module" which is needed when reporting the result
def scratch():
pass
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
def init_matplotlib():
"""
Check feasibility of matplotlib initialization.
"""
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
"""
Validates the docstrings of all the pre decided set of
modules for errors and docstring standards.
"""
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true",
help="Run also doctests on ")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--rst", nargs='?', const='doc', default=None,
help=("Run also examples from *rst files "
"discovered walking the directory(s) specified, "
"defaults to 'doc'"))
args = parser.parse_args(argv)
modules = []
names_dict = {}
if not args.module_names:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in module_names:
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
dots = True
success = True
results = []
errormsgs = []
if args.doctests or args.rst:
init_matplotlib()
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
if args.doctests or not args.rst:
print("Running checks for %d modules:" % (len(modules),))
for module in modules:
if dots:
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others,
module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
if args.rst:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
rst_path = os.path.relpath(os.path.join(base_dir, args.rst))
if os.path.exists(rst_path):
print('\nChecking files in %s:' % rst_path)
check_documentation(rst_path, results, args, dots)
else:
sys.stderr.write(f'\ninvalid --rst argument "{args.rst}"')
errormsgs.append('invalid directory argument to --rst')
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
for module, mod_results in results:
success = all(x[1] for x in mod_results)
if not success:
errormsgs.append(f'failed checking {module.__name__}')
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if len(errormsgs) == 0:
print("\nOK: all checks passed!")
sys.exit(0)
else:
print('\nERROR: ', '\n '.join(errormsgs))
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| numpy/numpy | tools/refguide_check.py | Python | bsd-3-clause | 38,678 | [
"Gaussian"
] | 44a3245bf13a349eac4658635058338196a78afb7ada35ddc2bf584dcf34c9e5 |
########################################################################
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2012/07/19 12:16:39
########################################################################
""" :mod: TypedListTests
=======================
.. module: TypedListTests
:synopsis: test case for TypedList
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
test cases for TypedList
"""
__RCSID__ = "$Id $"
##
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2012/07/19 12:16:48
## imports
import unittest
## SUT
from DIRAC.Core.Utilities.TypedList import TypedList, TDeque
class TestClass( object ):
""" dummy class """
pass
########################################################################
class TypedListTestCase(unittest.TestCase):
"""
.. class:: TypedlistTestCase
"""
def setUp( self ):
""" test setup """
self.numericTypes = ( int, long, float )
self.floatType = float
self.testClassType = TestClass
def test01ctor( self ):
""" c'tor test """
NumericList = TypedList( allowedTypes = self.numericTypes )
FloatList = TypedList( allowedTypes = self.floatType )
TestClassList = TypedList( allowedTypes = self.testClassType )
self.assertEqual( isinstance( NumericList, TypedList ), True )
self.assertEqual( isinstance( FloatList, TypedList ), True)
self.assertEqual( isinstance( TestClassList, TypedList ), True)
self.assertEqual( NumericList.allowedTypes() == self.numericTypes, True )
self.assertEqual( FloatList.allowedTypes() == self.floatType, True )
self.assertEqual( TestClassList.allowedTypes() == self.testClassType, True )
self.assertRaises( TypeError,
TypedList.__init__,
(),
{ "allowedTypes" : (1,2,3) } )
def test02_add_iadd_radd( self ):
""" += +lvalue +rvalue """
NumericList = TypedList((1, 1.0, 1), self.numericTypes)
## +=
NumericList += [2, 2.0, 2]
self.assertEqual( len(NumericList), 6 )
self.assertEqual(NumericList, [1, 1.0, 1, 2, 2.0, 2])
## +lvalue
lList = NumericList + [3, 3.0, 3]
self.assertEqual( len(lList), 9 )
self.assertEqual(lList, [1, 1.0, 1, 2, 2.0, 2, 3, 3.0, 3])
## rvalue+
rList = [0, 0.0, 0] + NumericList
self.assertEqual( len(rList), 9 )
self.assertEqual(rList, [0, 0.0, 0, 1, 1.0, 1, 2, 2.0, 2])
def test03_setitem_append_extend_insert( self ):
pass
def test_deque( self ):
class A( object ):
def __init__(self, i):
self.i = i
def __str__( self ):
return str(self.i)
d = TDeque( [A(1), A(2)],allowedTypes=(A,) )
d.append(A(3))
## test execution
if __name__ == "__main__":
TESTLOADER = unittest.TestLoader()
SUITE = TESTLOADER.loadTestsFromTestCase( TypedListTestCase )
unittest.TextTestRunner(verbosity=3).run( SUITE )
| arrabito/DIRAC | Core/Utilities/test/Test_TypedList.py | Python | gpl-3.0 | 2,911 | [
"DIRAC"
] | fd85d76a949433226caa94671c28d9c8a662af4d3c7701dfe00601579cc055bb |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import pymatgen
from pymatgen.analysis.cost.cost import CostDBCSV, CostAnalyzer, CostDBElements
module_dir = os.path.join(os.path.dirname(pymatgen.__file__), '..', 'test_files')
class CostAnalyzerTest(unittest.TestCase):
def setUp(self):
self.ca1 = CostAnalyzer(CostDBCSV(os.path.join(module_dir, "costdb_1.csv")))
self.ca2 = CostAnalyzer(CostDBCSV(os.path.join(module_dir, "costdb_2.csv")))
def test_cost_per_kg(self):
self.assertAlmostEqual(self.ca1.get_cost_per_kg("Ag"), 3, 3)
self.assertAlmostEqual(self.ca1.get_cost_per_kg("O"), 1, 3)
self.assertAlmostEqual(self.ca1.get_cost_per_kg("AgO"), 2.7416, 3)
self.assertAlmostEqual(self.ca2.get_cost_per_kg("AgO"), 1.5, 3)
def test_cost_per_mol(self):
self.assertAlmostEqual(self.ca1.get_cost_per_mol("Ag"), 0.3236, 3)
self.assertAlmostEqual(self.ca1.get_cost_per_mol("O"), 0.0160, 3)
self.assertAlmostEqual(self.ca1.get_cost_per_mol("AgO"), 0.3396, 3)
self.assertAlmostEqual(self.ca2.get_cost_per_mol("AgO"), 0.1858, 3)
def test_sanity(self):
self.assertEqual(self.ca1.get_cost_per_kg("Ag"), self.ca2.get_cost_per_kg("Ag"))
class CostDBTest(unittest.TestCase):
def test_sanity(self):
ca = CostAnalyzer(CostDBElements())
self.assertGreater(ca.get_cost_per_kg("PtO"), ca.get_cost_per_kg("MgO"))
if __name__ == "__main__":
unittest.main() | dongsenfo/pymatgen | pymatgen/analysis/cost/tests/test_cost.py | Python | mit | 1,560 | [
"pymatgen"
] | fa3751b8c1b79ff820ba300b8feab1ddee4c5369230218725344b245ad703cba |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
trueCoverage_rematch.py - Estimate the true bacterial chromosome
coverage and detects contamination with different strain or species.
Copyright (C) 2018 Miguel Machado <mpmachado@medicina.ulisboa.pt>
Last modified: February 27, 2018
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os.path
import utils
import functools
import sys
import argparse
version = '0.3'
def check_existing_default_config(species, script_path):
species = species.lower().split(' ')
true_coverage_config_folder = os.path.join(os.path.dirname(script_path), 'modules', 'trueCoverage_rematch', '')
config = None
reference = None
files = [f for f in os.listdir(true_coverage_config_folder) if not f.startswith('.') and
os.path.isfile(os.path.join(true_coverage_config_folder, f))]
for file_found in files:
file_path = os.path.join(true_coverage_config_folder, file_found)
if file_found == '_'.join(species) + '.config':
config = file_path
elif file_found == '_'.join(species) + '.fasta':
reference = file_path
return config, reference
def parse_config(config_file):
config = {'reference_file': None,
'length_extra_seq': None,
'maximum_number_absent_genes': None,
'maximum_number_genes_multiple_alleles': None,
'minimum_read_coverage': None,
'minimum_depth_presence': None,
'minimum_depth_call': None,
'minimum_depth_frequency_dominant_allele': None,
'minimum_gene_coverage': None,
'minimum_gene_identity': None}
with open(config_file, 'rtU') as reader:
field = None
for line in reader:
line = line.rstrip('\r\n')
if len(line) > 0:
line = line.split(' ')[0]
if line.startswith('#'):
line = line[1:].split(' ')[0]
field = line
else:
if field is not None:
if field in ['length_extra_seq', 'maximum_number_absent_genes',
'maximum_number_genes_multiple_alleles', 'minimum_read_coverage',
'minimum_depth_presence', 'minimum_depth_call', 'minimum_gene_coverage',
'minimum_gene_identity']:
line = int(line)
if field in ['minimum_gene_coverage', 'minimum_gene_identity']:
if line < 0 or line > 100:
sys.exit('minimum_gene_coverage in trueCoverage_rematch config file must be an'
' integer between 0 and 100')
elif field == 'minimum_depth_frequency_dominant_allele':
line = float(line)
if line < 0 or line > 1:
sys.exit('minimum_depth_frequency_dominant_allele in trueCoverage_rematch config file'
' must be a double between 0 and 1')
config[field] = line
field = None
for field in config:
if config[field] is None:
sys.exit(field + ' in trueCoverage_rematch config file is missing')
return config
def clean_headers_reference_file(reference_file, outdir, extra_seq, rematch_module):
problematic_characters = ["|", " ", ",", ".", "(", ")", "'", "/", ":"]
print('Checking if reference sequences contain ' + str(problematic_characters) + '\n')
new_reference_file = str(reference_file)
sequences, genes, headers_changed = rematch_module.get_sequence_information(reference_file, extra_seq)
if headers_changed:
print('At least one of the those characters was found. Replacing those with _' + '\n')
new_reference_file = os.path.join(outdir,
os.path.splitext(os.path.basename(reference_file))[0] +
'.headers_renamed.fasta')
with open(new_reference_file, 'wt') as writer:
for i in sequences:
writer.write('>' + sequences[i]['header'] + '\n')
fasta_sequence_lines = rematch_module.chunkstring(sequences[i]['sequence'], 80)
for line in fasta_sequence_lines:
writer.write(line + '\n')
return new_reference_file, genes, sequences
def rematch_report_assess_failing(outdir, time_str, rematch_folder, sample_data_general, config):
"""
Copy ReMatCh sample report to outdir and assess sample failing status
Parameters
----------
outdir : str
Path to the directory where true_coverage results will be stored, e.g. "/path/to/output/directory/"
time_str : str or None
String containing date and time information in the following format "%Y%m%d-%H%M%S" or None
rematch_folder : str
Path to the temporary ReMatCh folder that contain ReMatCh results
sample_data_general : dict
ReMatCh sample_data_general dictionary containing general sample results
config : dict
parse_config config dictionary containing the settings to run trueCoverage_rematch
Returns
-------
failing : dict
Dictionary containing the reasons for failing true_coverage
"""
print('Writing report file')
os.rename(os.path.join(rematch_folder, 'rematchModule_report.txt'),
os.path.join(outdir, 'trueCoverage_report.{time_str}.txt'.format(time_str=time_str)
if time_str is not None else 'trueCoverage_report.txt'))
failing = {}
if sample_data_general['number_absent_genes'] > config['maximum_number_absent_genes']:
failing['absent_genes'] = 'The number of absent genes ({real_absent}) exceeds the maximum allowed' \
' ({max_absent})'.format(real_absent=sample_data_general['number_absent_genes'],
max_absent=config['maximum_number_absent_genes'])
if sample_data_general['number_genes_multiple_alleles'] > config['maximum_number_genes_multiple_alleles']:
failing['multiple_alleles'] = 'The number of genes with multiple alleles' \
' ({real_multiple}) exceeds the maximum allowed' \
' ({max_multiple})'.format(
real_multiple=sample_data_general['number_genes_multiple_alleles'],
max_multiple=config['maximum_number_genes_multiple_alleles'])
if sample_data_general['mean_sample_coverage'] < config['minimum_read_coverage']:
failing['read_coverage'] = 'The mean read coverage for genes present' \
' ({real_coverage}) dit not meet the minimum required' \
' ({min_coverage})'.format(real_coverage=sample_data_general['mean_sample_coverage'],
min_coverage=config['minimum_read_coverage'])
return failing
trueCoverage_timer = functools.partial(utils.timer, name='True coverage check')
@trueCoverage_timer
def run_true_coverage(sample, fastq, reference, threads, outdir, extra_seq, min_cov_presence, min_cov_call,
min_frequency_dominant_allele, min_gene_coverage, debug, min_gene_identity,
true_coverage_config, rematch_script, num_map_loc=1, bowtie_algorithm='--very-sensitive-local',
clean_run_rematch=True):
pass_qc = False
failing = {}
true_coverage_folder = os.path.join(outdir, 'trueCoverage', '')
utils.removeDirectory(true_coverage_folder)
os.mkdir(true_coverage_folder)
sys.path.append(os.path.join(os.path.dirname(rematch_script), 'modules'))
import rematch_module
# Run ReMatCh
reference_file, gene_list_reference, reference_dict = clean_headers_reference_file(reference, true_coverage_folder,
extra_seq, rematch_module)
time_taken, run_successfully, data_by_gene, sample_data_general, consensus_files, consensus_sequences = \
rematch_module.run_rematch_module(sample, fastq, reference_file, threads, true_coverage_folder, extra_seq,
min_cov_presence, min_cov_call, min_frequency_dominant_allele,
min_gene_coverage, debug, num_map_loc, min_gene_identity, 'first', 7, 'none',
reference_dict, 'X', bowtie_algorithm, None, gene_list_reference, True,
clean_run=clean_run_rematch)
if run_successfully:
failing = rematch_report_assess_failing(outdir, None, true_coverage_folder, sample_data_general,
true_coverage_config)
else:
failing['sample'] = 'Did not run'
if len(failing) == 0:
pass_qc = True
failing['sample'] = False
else:
print(failing)
if not debug:
utils.removeDirectory(true_coverage_folder)
return run_successfully, pass_qc, failing, sample_data_general
def arguments_required_length(tuple_length_options, argument_name):
class RequiredLength(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if len(values) not in tuple_length_options:
msg = 'argument {argument_name} requires one of the following number of arguments:' \
' {tuple_length_options}'.format(argument_name=argument_name,
tuple_length_options=tuple_length_options)
parser.error(msg)
setattr(args, self.dest, values)
return RequiredLength
def arguments_check_directory(argument_name):
"""
Check if the directory passed to the argument exists
Parameters
----------
argument_name : str
Argument name, e.g. '--indir'
Returns
-------
ArgumentsCheckDirectory : str
Full path of the directory
"""
class ArgumentsCheckDirectory(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
directory = os.path.abspath(values)
if not os.path.isdir(directory):
msg = 'argument {argument_name}: {directory} is not a directory'.format(argument_name=argument_name,
directory=directory)
parser.error(msg)
setattr(args, self.dest, directory)
return ArgumentsCheckDirectory
def check_fasta_config_exist(indir, species):
"""
Check if species fasta and config files exist inside indir
Parameters
----------
indir : str
Directory path
species : list
List with species name, e.g. ['escherichia', 'coli']
Returns
-------
msg : str or None
Message with the error found or None
required_files : dict
Dictionary with the two files required, e.g.
{'fasta': /path/escherichia_coli.fasta, 'config': /path/escherichia_coli.config}
"""
files = [f for f in os.listdir(indir) if not f.startswith('.') and os.path.isfile(os.path.join(indir, f))]
required_files = {}
for file_found in files:
root, ext = os.path.splitext(file_found)
if root.lower() == '_'.join(species).lower():
ext = ext.lstrip('.')
if ext in ('fasta', 'config'):
required_files[ext] = os.path.join(indir, file_found)
msg = None
if len(required_files) == 1:
msg = 'only found the {ext} file for {species} species (both fasta and config are' \
' required)'.format(ext=required_files.keys()[0], species=' '.join(species))
elif len(required_files) == 0:
msg = 'no files were found for {species} species (a fasta and config files are' \
' required)'.format(species=' '.join(species))
return msg, required_files
def include_rematch_dependencies_path(do_not_use_provided_software=False):
rematch_script = utils.find_rematch()
if rematch_script is not None:
programs_version_dictionary = {'rematch.py': ['--version', '>=', '4.0.1']}
missing_programs, _ = utils.checkPrograms(programs_version_dictionary)
if len(missing_programs) > 0:
sys.exit('\n' + 'Errors:' + '\n' + '\n'.join(missing_programs))
else:
if not do_not_use_provided_software:
path_variable = os.environ['PATH']
script_folder = os.path.dirname(rematch_script)
bowtie2 = os.path.join(script_folder, 'src', 'bowtie2-2.2.9')
samtools = os.path.join(script_folder, 'src', 'samtools-1.3.1', 'bin')
bcftools = os.path.join(script_folder, 'src', 'bcftools-1.3.1', 'bin')
os.environ['PATH'] = str(':'.join([bowtie2, samtools, bcftools, path_variable]))
print('PATH={path}'.format(path=os.environ['PATH']))
else:
sys.exit('ReMatCh not found in PATH')
return rematch_script
def start_logger(workdir):
import time
time_str = time.strftime("%Y%m%d-%H%M%S")
sys.stdout = Logger(workdir, time_str)
logfile = sys.stdout.getLogFile()
return logfile, time_str
class Logger(object):
def __init__(self, out_directory, time_str):
self.logfile = os.path.join(out_directory, str('run.' + time_str + '.log'))
self.terminal = sys.stdout
self.log = open(self.logfile, "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
self.log.flush()
def flush(self):
pass
def getLogFile(self):
return self.logfile
def main():
if sys.version_info[0] > 2:
sys.exit('Must be using Python 2. Try calling "python2 trueCoverage_rematch.py"')
parser = argparse.ArgumentParser(prog='python2 trueCoverage_rematch.py',
description="Estimate the true bacterial chromosome coverage and detects"
" contamination with different strain or species",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--version', help='Version information', action='version', version=str('%(prog)s v' + version))
parser_required = parser.add_argument_group('Required options')
parser_required.add_argument('-f', '--fastq', nargs='+', action=arguments_required_length((1, 2), '--fastq'),
type=argparse.FileType('r'), metavar=('/path/to/input/file.fq.gz'),
help='Path to single OR paired-end fastq files. If two files are passed, they will be'
' assumed as being the paired fastq files',
required=True)
parser_true_coverage_files = parser.add_argument_group('Options for trueCoverage_rematch species files')
parser_true_coverage_files.add_argument('-s', '--species', nargs=2, type=str,
metavar=('Yersinia', 'enterocolitica'), help='Species name',
required=False)
parser_true_coverage_files.add_argument('-i', '--indir', type=str, action=arguments_check_directory('--indir'),
metavar='/path/to/fasta/config/indir/directory/',
help='Path to the directory where species reference fasta files and config'
' files can be found',
required=False)
parser_external_files = parser.add_argument_group('Options for external fasta and config files')
parser_external_files.add_argument('-r', '--reference', type=argparse.FileType('r'),
metavar='/path/to/reference_sequence.fasta',
help='Fasta file containing reference sequences. Ideally they should be'
' housekeeping gene sequences distributed throughout the genome.'
' Alternativelly, MLST gene sequences might be a good approximation.',
required=False)
parser_external_files.add_argument('-c', '--config', type=argparse.FileType('r'), metavar='/path/to/config.file',
help='Config file with the settings to run trueCoverage_rematch. Check some'
' examples in INNUca GitHub ('
'https://github.com/B-UMMI/INNUca/tree/master/modules/trueCoverage_rematch).',
required=False)
parser_optional = parser.add_argument_group('Facultative options')
parser_optional.add_argument('-o', '--outdir', type=str, metavar='/path/to/output/directory/',
help='Path to the directory where the results will be stored',
required=False, default='.')
parser_optional.add_argument('-j', '--threads', type=int, metavar='N', help='Number of threads to use',
required=False, default=1)
parser_optional.add_argument('--bowtieAlgo', type=str, metavar='"--very-sensitive-local"',
help='Bowtie2 alignment mode to be used via ReMatCh to map the reads and'
' determine the true coverage. It can be an end-to-end alignment'
' (unclipped alignment) or local alignment (soft clipped'
' alignment). Also, the user can choose between fast or sensitive'
' alignments. Please check Bowtie2 manual for extra information:'
' http://bowtie-bio.sourceforge.net/bowtie2/index.shtml .'
' This option should be provided between quotes and starting with'
' an empty space (like --bowtieAlgo " --very-fast") or using equal'
' sign (like --bowtieAlgo="--very-fast")',
required=False, default='--very-sensitive-local')
parser_optional.add_argument('--json', action='store_true', help='Stores the results also in JSON format')
args = parser.parse_args()
if (args.species is not None or args.indir is not None) and (args.reference is not None or args.config is not None):
parser.error('Do not mix options from the two pairs: --species and --indir OR --reference and --config')
elif args.species is None and args.indir is None and args.reference is None and args.config is None:
parser.error('At least one of the following option pairs must be specified:'
' --species and --indir OR --reference and --config')
if args.species is not None:
error_msg, required_files = check_fasta_config_exist(args.indir, args.species)
if error_msg is not None:
parser.error('argument {argument_name}: {error_msg}'.format(argument_name='--indir', error_msg=error_msg))
else:
required_files = {'fasta': os.path.abspath(args.reference.name), 'config': os.path.abspath(args.config.name)}
rematch_script = include_rematch_dependencies_path()
args.outdir = os.path.abspath(args.outdir)
if not os.path.isdir(args.outdir):
os.mkdir(args.outdir)
# Start logger
logfile, time_str = start_logger(args.outdir)
config = parse_config(required_files['config'])
import tempfile
rematch_folder = tempfile.mkdtemp(prefix='trueCoverage_rematch_', suffix='_tmp', dir=args.outdir)
run_successfully, pass_qc, time_taken, failing, sample_data_general = \
run_true_coverage('sample',
[os.path.abspath(fastq.name) for fastq in args.fastq],
required_files['fasta'],
args.threads,
rematch_folder,
config['length_extra_seq'],
config['minimum_depth_presence'],
config['minimum_depth_call'],
config['minimum_depth_frequency_dominant_allele'],
config['minimum_gene_coverage'],
False,
config['minimum_gene_identity'],
config,
rematch_script,
num_map_loc=1,
bowtie_algorithm=args.bowtieAlgo,
clean_run_rematch=True)
import shutil
if run_successfully:
if len(failing) > 0:
with open(os.path.join(args.outdir, 'failing.' + time_str + '.txt'), 'wt') as writer:
for scope, reason in failing.items():
writer.write('#{scope}\n'
'{reason}\n'.format(scope=scope, reason=reason))
if args.json:
import json
with open(os.path.join(args.outdir, 'sample_data_general.' + time_str + '.json'), 'wt') as writer:
json.dump(sample_data_general, writer, separators=(",", ":"))
if len(failing) > 0:
with open(os.path.join(args.outdir, 'failing.' + time_str + '.json'), 'wt') as writer:
json.dump(failing, writer, separators=(",", ":"))
else:
shutil.rmtree(rematch_folder)
sys.exit('Something went wrong while running ReMatCh')
shutil.rmtree(rematch_folder)
if __name__ == "__main__":
main()
| B-UMMI/INNUca | modules/trueCoverage_rematch.py | Python | gpl-3.0 | 22,715 | [
"Bowtie"
] | 022e45ab2dacede497f9c54173ce74cc8b6ddaf2af1ad580b2464c515abcf656 |
##########################################################################
# this script was generated by openmm-builder. to customize it further,
# you can save the file to disk and edit it with your favorite editor.
##########################################################################
from __future__ import print_function
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit
from sys import stdout
import argparse
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--checkpoint",
help="path to an OpenMM checkpoint",
type=str
)
return parser.parse_args()
freq = int(1e5)
total = int(2e9)
pdb = app.PDBFile('input.pdb')
forcefield = app.ForceField('amber03.xml', 'amber03_obc.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=app.NoCutoff,
constraints=None)
integrator = mm.LangevinIntegrator(300*unit.kelvin, 50/unit.picoseconds,
1.0*unit.femtoseconds)
platform = mm.Platform.getPlatformByName('CUDA')
properties = {'CudaDeviceIndex' : '0'}
simulation = app.Simulation(pdb.topology, system, integrator, platform, properties)
simulation.context.setPositions(pdb.positions)
simulation.reporters.append(app.DCDReporter('trajectory.dcd', freq))
simulation.reporters.append(app.StateDataReporter("sim.csv", freq, step=True,
potentialEnergy=True, totalEnergy=True, temperature=True, separator='\t'))
simulation.reporters.append(app.CheckpointReporter('ckpt.chk', freq))
print('Running Production...')
total_steps = 0
while total_steps < total:
simulation.step(freq)
total_steps += freq
print('Done!')
| LCLS/Protein-Folding-Sims | hp24stab/amber03/CUDA/amber03_implicit_noconstraints.py | Python | mit | 1,698 | [
"OpenMM"
] | e200bc6039ea05f1650a16182b09a631e3cd66091bb8d4a2604fa5c3c6fc3e2a |
import numpy as np
from six import string_types, integer_types
from .util import is_missing
from .descriptor import Descriptor
class Result(object):
r"""Result type."""
__slots__ = ("mol", "_values", "_descriptors", "_name_to_value")
def __init__(self, mol, r, d):
self.mol = mol
self._values = list(r)
self._descriptors = list(d)
self._name_to_value = None
def __str__(self):
return "{}({{{}}})".format(
self.__class__.__name__,
", ".join(
"'{}': {}".format(k, v) for k, v in zip(self._descriptors, self._values)
),
)
def __repr__(self):
return "{}({!r},{!r},{!r})".format(
self.__class__.__name__, self.mol, self._values, self._descriptors
)
def fill_missing(self, value=np.nan):
r"""Replace missing value to "value".
Parameters:
value: value that missing value is replaced
Returns:
Result
"""
return self.__class__(
self.mol,
[(value if is_missing(v) else v) for v in self.values()],
self.keys(),
)
def drop_missing(self):
r"""Delete missing value.
Returns:
Result
"""
newvalues = []
newdescs = []
for d, v in self.items():
if not is_missing(v):
newvalues.append(v)
newdescs.append(d)
return self.__class__(self.mol, newvalues, newdescs)
def items(self):
r"""Get items.
Returns:
Iterable[(Descriptor, value)]
"""
return ((k, v) for k, v in zip(self.keys(), self.values()))
def keys(self):
r"""Get descriptors instances.
Returns:
Iterable[Descriptor]
"""
return iter(self._descriptors)
def values(self):
r"""Get descriptor values.
Returns:
Iterable[value]
"""
return iter(self._values)
__iter__ = values
def __reversed__(self):
return reversed(self._values)
def asdict(self, rawkey=False):
r"""Convert Result to dict.
Parameters:
rawkey(bool):
* True: dict key is Descriptor instance
* False: dict key is str
Returns:
dict
"""
if rawkey:
return dict(self.items())
else:
return {str(k): v for k, v in self.items()}
@property
def ix(self):
r"""Access descriptor value by index.
>>> from mordred import Calculator, Lipinski
>>> from rdkit import Chem
>>> result = Calculator(Lipinski.Lipinski)(Chem.MolFromSmiles("C1CCCCC1"))
>>> result.ix[0]
True
"""
return GetValueByIndex(self._values)
@property
def name(self):
r"""Access descriptor value by descriptor name or instance.
>>> from mordred import Calculator, descriptors
>>> from rdkit import Chem
>>> result = Calculator(descriptors)(Chem.MolFromSmiles("C1CCCCC1"))
>>> result.name["C2SP3"]
6
"""
if self._name_to_value is None:
self._name_to_value = {
str(d): v for d, v in zip(self._descriptors, self._values)
}
return GetValueByName(self._name_to_value)
def __getitem__(self, key):
if isinstance(key, (integer_types, slice)):
return self.ix[key]
elif isinstance(key, (string_types, Descriptor)):
return self.name[key]
else:
raise TypeError(
"Result indices must be "
"integers, slices, strings or Descriptor instance, "
"not {}".format(key.__class__.__name__)
)
def __len__(self):
return len(self._descriptors)
class GetValueByIndex(object):
__slots__ = ("_values",)
def __init__(self, values):
self._values = values
def __getitem__(self, key):
return self._values[key]
class GetValueByName(object):
__slots__ = ("_name_to_value",)
def __init__(self, name_to_value):
self._name_to_value = name_to_value
def __getitem__(self, key):
return self._name_to_value[str(key)]
| mordred-descriptor/mordred | mordred/_base/result.py | Python | bsd-3-clause | 4,323 | [
"RDKit"
] | e62d7598cc90628f77a763755a9909315f790d063490d9f94a8c7e13870cfdbd |
from __future__ import print_function
import os
import shutil
import itertools
import tempfile
import subprocess
from distutils.spawn import find_executable
import numpy as np
import mdtraj as md
from mdtraj.testing import get_fn, eq, skipif
HAVE_DSSP = find_executable('mkdssp')
DSSP_MSG = "This test requires mkdssp to be installed, from http://swift.cmbi.ru.nl/gv/dssp/"
tmpdir = None
def setup():
global tmpdir
tmpdir = tempfile.mkdtemp()
def teardown():
shutil.rmtree(tmpdir)
def call_dssp(traj, frame=0):
inp = os.path.join(tmpdir, 'temp.pdb')
out = os.path.join(tmpdir, 'temp.pdb.dssp')
traj[frame].save(inp)
cmd = ['mkdssp', '-i', inp, '-o', out]
subprocess.check_output(' '.join(cmd), shell=True)
KEY_LINE = ' # RESIDUE AA STRUCTURE BP1 BP2 ACC N-H-->O O-->H-N N-H-->O O-->H-N TCO KAPPA ALPHA PHI PSI X-CA Y-CA Z-CA'
with open(out) as f:
# exaust the first entries
max(itertools.takewhile(lambda l: not l.startswith(KEY_LINE), f))
return np.array([line[16] for line in f if line[13] != '!'])
def assert_(a, b):
try:
assert np.all(a == b)
except AssertionError:
if len(a) != len(b):
print('Not the same length: %d vs %s' % (len(a), len(b)))
raise
for i, (aa, bb) in enumerate(zip(a, b)):
if aa == bb:
print("%3d: '%s' '%s'" % (i, aa, bb))
else:
print("%3d: '%s' '%s' <-" % (i, aa, bb))
raise
@skipif(not HAVE_DSSP, DSSP_MSG)
def test_1():
for fn in ['1bpi.pdb', '1vii.pdb', '4ZUO.pdb', '1am7_protein.pdb']:
t = md.load_pdb(get_fn(fn))
t = t.atom_slice(t.top.select_atom_indices('minimal'))
f = lambda : assert_(call_dssp(t), md.compute_dssp(t, simplified=False)[0])
f.description = 'test_1: %s' % fn
yield f
@skipif(not HAVE_DSSP, DSSP_MSG)
def test_2():
t = md.load(get_fn('2EQQ.pdb'))
for i in range(len(t)):
yield lambda: assert_(call_dssp(t[i]), md.compute_dssp(t[i], simplified=False)[0])
@skipif(not HAVE_DSSP, DSSP_MSG)
def test_3():
# 1COY gives a small error, due to a broken chain.
pdbids = ['1GAI', '6gsv', '2AAC']
for pdbid in pdbids:
t = md.load_pdb('http://www.rcsb.org/pdb/files/%s.pdb' % pdbid)
t = t.atom_slice(t.top.select_atom_indices('minimal'))
f = lambda : assert_(call_dssp(t), md.compute_dssp(t, simplified=False)[0])
f.description = 'test_1: %s' % pdbid
yield f
def test_4():
t = md.load_pdb(get_fn('1am7_protein.pdb'))
a = md.compute_dssp(t, simplified=True)
b = md.compute_dssp(t, simplified=False)
assert len(a) == len(b)
assert len(a[0]) == len(b[0])
assert list(np.unique(a[0])) == ['C', 'E', 'H']
def test_5():
t = md.load(get_fn('4waters.pdb'))
a = md.compute_dssp(t, simplified=True)
b = md.compute_dssp(t, simplified=False)
ref = np.array([['NA', 'NA', 'NA', 'NA']])
np.testing.assert_array_equal(a, ref)
np.testing.assert_array_equal(b, ref)
#def test_6():
# t = md.load(get_fn('alanine-dipeptide-explicit.pdb'))
# a = md.compute_dssp(t, simplified=True)
# protein_residues = np.array([set(a.name for a in r.atoms).issuperset(('C', 'N', 'O', 'CA')) for r in t.topology.residues])
# assert np.unique(a[:, protein_residues]) == "C"
# assert np.unique(a[:, np.logical_not(protein_residues)]) == 'NA'
def test_7():
t = md.load(get_fn('2EQQ.pdb'))
a = md.compute_dssp(t, simplified=True)
| ctk3b/mdtraj | mdtraj/geometry/tests/test_dssp.py | Python | lgpl-2.1 | 3,549 | [
"MDTraj"
] | 762b68f01ecc2e68fff67aef97cd962e7e030109eb3d0344b5c3c118fba6fef5 |
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# Modifications Copyright 2004/2005 James Casbon.
# Copyright 2005 by Regents of the University of California. All rights reserved
# (Major rewrite for conformance to corebio. Gavin Crooks)
#
# This code is derived from the Biopython distribution and is governed by it's
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""ASTRAL: Compendium for Sequence and Structure Analysis.
The ASTRAL compendium provides databases and tools useful for analyzing protein structures and their sequences. It is partially derived from, and augments the SCOP: Structural Classification of Proteins database. Most of the resources depend upon the coordinate files maintained and distributed by the Protein Data Bank.
Ref:
http://astral.berkeley.edu/
* Classes :
- Raf -- A file of ASTRAL RAF (Rapid Access Format) Sequence Maps.
- RafSeqMap -- A sequence map, a RAF record.
- Res -- A single residue mapping from a RAF record.
* Functions :
- parse_domain -- Convert an ASTRAL fasta header string into a Scop domain.
- normalize_letters -- Normalize RAF amino acid codes.
"""
import re
from copy import copy
from corebio.db.scop import Domain, Residues
from corebio.data import extended_three_to_one as to_one_letter_code
from corebio.utils import FileIndex
__all__ = ('astral_evalues', 'astral_percent_identities',
'astral_evalues_filenames', 'normalize_letters', 'parse_domain',
'Raf', 'RafSeqMap', 'Res')
# Percentage identity filtered ASTRAL SCOP genetic domain sequence subset
astral_percent_identities = [10,20,25,30,35,40,50,70,90,95,100]
# E-value filtered ASTRAL SCOP genetic domain sequence subsets, based on PDB SEQRES records.
astral_evalues = [10, 5, 1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 1e-4, 1e-5, 1e-10, 1e-15,1e-20, 1e-25, 1e-50]
# A map between evalues and astral filename suffixes.
astral_evalues_filenames = {
10: 'e+1', 5: 'e+0,7', 1: 'e+0', 0.5: 'e-0,3', 0.1: 'e-1',
0.05: 'e-1,3', 0.01: 'e-2', 0.005: 'e-2,3', 0.001: 'e-3',
1e-4: 'e-4', 1e-5: 'e-5', 1e-10: 'e-10', 1e-15: 'e-15',
1e-20: 'e-20', 1e-25: 'e-25', 1e-50: 'e-50' }
def normalize_letters(one_letter_code) :
"""Convert RAF one-letter amino acid codes into IUPAC standard codes.
Letters are uppercased, and "." ("Unknown") is converted to "X".
"""
if one_letter_code == '.' :
return 'X'
else :
return one_letter_code.upper()
_domain_re = re.compile(r">?([\w_\.]*)\s+([\w\.]*)\s+\(([^)]*)\) (.*)")
def parse_domain(str) :
"""Convert an ASTRAL fasta header string into a SCOP domain.
An ASTRAL (http://astral.stanford.edu/) header contains a concise
description of a SCOP domain. A very similar format is used when a
Domain object is converted into a string. The Domain returned by this
method contains most of the SCOP information, but it will not be located
within the SCOP hierarchy (i.e. the parent node will be None). The
description is composed of the SCOP protein and species descriptions.
A typical ASTRAL header looks like --
>d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli}
"""
m = _domain_re.match(str)
if (not m) : raise ValueError("Domain: "+ str)
dom = Domain()
dom.sid = m.group(1)
dom.sccs = m.group(2)
dom.residues = Residues(m.group(3))
if not dom.residues.pdbid :
dom.residues.pdbid= dom.sid[1:5]
dom.description = m.group(4).strip()
return dom
class Raf(FileIndex) :
"""ASTRAL RAF (Rapid Access Format) Sequence Maps.
The ASTRAL RAF Sequence Maps record the relationship between the PDB SEQRES
records (representing the sequence of the molecule used in an experiment)
and the ATOM records (representing the atoms experimentally observed).
This data is derived from the Protein Data Bank CIF files. Known errors in
the CIF files are corrected manually, with the original PDB file serving as
the final arbiter in case of discrepancies.
Residues are referenced by residue ID. This consists of the PDB residue
sequence number (up to 4 digits) and an optional PDB insertion code (an
ascii alphabetic character, a-z, A-Z). e.g. "1", "10A", "1010b", "-1"
See "ASTRAL RAF Sequence Maps":http://astral.stanford.edu/raf.html
The RAF file itself is about 50 MB. Each line consists of a sequence map of
a different protein chain. This index provides rapid, random
access of RAF records without having to load the entire file into memory.
This class does not load the entire RAF file into memory. Instead, it
reads the file once, noting the location and content of each RafSeqMap.
The index key is a concatenation of the PDB ID and chain ID. e.g
"2drcA", "155c_". RAF uses an underscore to indicate blank
chain IDs. Custom maps of subsequences or spanning multiple chains can
be constructed with the get_seqmap method.
"""
def __init__(self, raf_file) :
def linekey(line) :
if not line or len(line)<5 or line.isspace() or line[0]=='#':
return None
return line[0:5]
def parser( f) : return RafSeqMap(f.readline())
FileIndex.__init__(self, raf_file, linekey, parser)
def get_seqmap(self, residues) :
"""Get the sequence map for a collection of residues.
residues -- A SCOP style description of a collection of residues from a
PDB strucure, (e.g. '(1bba A:10-20,B:)'), as a string or a
scop.Residues instance.
"""
if type(residues)== str :
residues = Residues(residues)
pdbid = residues.pdbid
frags = residues.fragments
if not frags: frags =(('_','',''),) # All residues of unnamed chain
seqMap = None
for frag in frags :
chainid = frag[0]
if chainid=='' or chainid=='-' or chainid==' ' or chainid=='_':
chainid = '_'
sid = pdbid + chainid
sm = self[sid]
# Cut out fragment of interest
start = 0
end = len(sm.res)
if frag[1] : start = int(sm.index(frag[1], chainid))
if frag[2] : end = int(sm.index(frag[2], chainid)+1)
sm = sm[start:end]
if seqMap is None :
seqMap = sm
else :
seqMap += sm
return seqMap
# End Raf
class RafSeqMap(object) :
"""ASTRAL RAF (Rapid Access Format) Sequence Maps.
RafSeqMap is a list like object; you can find the location of particular
residues with index(), slice this RafSeqMap into fragments, and glue
fragments back together with extend().
- pdbid -- The PDB 4 character ID
- pdb_datestamp -- From the PDB file
- version -- The RAF format version. e.g. 0.01
- flags -- RAF flags. (See release notes for more information.)
- res -- A list of Res objects, one for each residue in this sequence map
"""
def __init__(self, raf_record=None) :
"""Parses a RAF record into a RafSeqMap object."""
self.pdbid = ''
self.pdb_datestamp = ''
self.version = ''
self.flags = ''
self.res = []
if not raf_record : return
header_len = 38
line = raf_record.rstrip() # no trailing whitespace
if len(line)<header_len:
raise ValueError("Incomplete header: "+line)
self.pdbid = line[0:4]
chainid = line[4:5]
self.version = line[6:10]
# Raf format versions 0.01 and 0.02 are identical for practical purposes
if(self.version != "0.01" and self.version !="0.02") :
raise ValueError("Incompatible RAF version: "+self.version)
self.pdb_datestamp = line[14:20]
self.flags = line[21:27]
for i in range(header_len, len(line), 7) :
f = line[i : i+7]
if len(f)!=7:
raise ValueError("Corrupt Field: ("+f+")" )
r = Res()
r.chainid = chainid
r.resid = f[0:5].strip()
r.atom = normalize_letters(f[5:6])
r.seqres = normalize_letters(f[6:7])
self.res.append(r)
# end __init__
#@staticmethod
def records(raf_file) :
"""Iterates over a Raf file, generating RafSeqMaps """
for line in raf_file:
if line[0] =='#': continue # A comment
if line.isspace() : continue
yield RafSeqMap(line)
records = staticmethod(records)
def index(self, resid, chainid="_") :
for i in range(0, len(self.res)) :
if self.res[i].resid == resid and self.res[i].chainid == chainid :
return i
raise KeyError("No such residue "+chainid+resid)
def __getslice__(self, i, j) :
s = copy(self)
s.res = s.res[i:j]
return s
def append(self, res) :
"""Append another Res object onto the list of residue mappings."""
self.res.append(res)
def extend(self, other) :
"""Append another RafSeqMap onto the end of self.
Both RafSeqMaps must have the same PDB ID, PDB datestamp and
RAF version. The RAF flags are erased if they are inconsistent. This
may happen when fragments are taken from different chains.
"""
if not isinstance(other, RafSeqMap):
raise TypeError("Can only extend a RafSeqMap with a RafSeqMap.")
if self.pdbid != other.pdbid :
raise TypeError("Cannot add fragments from different proteins.")
if self.version != other.version :
raise TypeError("Incompatible rafs.")
if self.pdb_datestamp != other.pdb_datestamp :
raise TypeError("Different pdb dates!")
if self.flags != other.flags :
self.flags = ''
self.res += other.res
def __iadd__(self, other) :
self.extend(other)
return self
def __add__(self, other) :
s = copy(self)
s.extend(other)
return s
def extract_atoms(self, pdb_handle, out_handle) :
"""Extract all relevant ATOM and HETATOM records from a PDB file.
The PDB file is scanned for ATOM and HETATOM records. If the
chain ID, residue ID (seqNum and iCode), and residue type match
a residue in this sequence map, then the record is echoed to the
output handle.
This is typically used to find the coordinates of a domain, or other
residue subset.
pdb_file -- A handle to the relevant PDB file.
out_file -- All output is written to this stream.
"""
resSet = {}
for r in self.res :
if r.atom=='X' : # Unknown residue type
continue
chainid = r.chainid
if chainid == '_':
chainid = ' '
resid = r.resid
resSet[(chainid,resid)] = r
resFound = {}
for line in pdb_handle :
if line.startswith("ATOM ") or line.startswith("HETATM") :
chainid = line[21:22]
resid = line[22:27].strip()
key = (chainid, resid)
if key in resSet:
res = resSet[key]
atom_aa = res.atom
resName = line[17:20].capitilize()
if resName in to_one_letter_code :
if to_one_letter_code[resName] == atom_aa :
out_handle.write(line)
resFound[key] = res
if len(resSet) != len(resFound) :
raise RuntimeError('I could not find at least one ATOM or '
'HETATM record for each and every residue in this sequence map.')
class Res(object) :
""" A single residue mapping from a RAF record.
- chainid -- A single character chain ID.
- resid -- The residue ID.
- atom -- amino acid one-letter code from ATOM records.
- seqres -- amino acid one-letter code from SEQRES records.
"""
def __init__(self) :
self.chainid = ''
self.resid = ''
self.atom = ''
self.seqres = ''
| NarlikarLab/DIVERSITY | weblogoMod/corebio/db/astral.py | Python | gpl-3.0 | 12,519 | [
"Biopython"
] | 03e60a9d39d36bed6848d81538892afd396b763db1f2e862873a58cc081366c2 |
"""
Author: Thomas G. Close (tclose@oist.jp)
Copyright: 2012-2014 Thomas G. Close.
License: This file is part of the "NineLine" package, which is released under
the MIT Licence, see LICENSE for details.
"""
from __future__ import absolute_import
try:
from mpi4py import MPI # @UnresolvedImport @UnusedImport
except:
pass
import pyNN.models
from pype9.simulate.neuron.cells import CellMetaClass
from pype9.simulate.common.network.cell_wrapper import (
PyNNCellWrapper as BasePyNNCellWrapper,
PyNNCellWrapperMetaClass as BasePyNNCellWrapperMetaClass)
from ..units import UnitHandler
class PyNNCellWrapper(BasePyNNCellWrapper, pyNN.models.BaseCellType):
"""
Extends the vanilla Cell to include all the PyNN requirements
"""
class PyNNCellWrapperMetaClass(BasePyNNCellWrapperMetaClass):
loaded_celltypes = {}
def __new__(cls, component_class, default_properties,
initial_state, initial_regime, **kwargs): # @UnusedVariable @IgnorePep8
model = CellMetaClass(component_class=component_class,
default_properties=default_properties,
initial_state=initial_state,
standalone=False, **kwargs)
try:
celltype = cls.loaded_celltypes[model.name]
except KeyError:
dct = {'model': model,
'default_properties': default_properties,
'initial_state': initial_state,
'initial_regime': initial_regime,
'extra_parameters': {'_in_array': True}}
celltype = super(PyNNCellWrapperMetaClass, cls).__new__(
cls, model.name, (PyNNCellWrapper,), dct)
recordable_keys = list(model(default_properties,
_in_array=True).recordable.keys())
assert set(celltype.recordable) == set(recordable_keys), (
"Mismatch of recordable keys between CellPyNN ('{}') and "
"Cell class '{}' ('{}')".format(
"', '".join(set(celltype.recordable)), model.name,
"', '".join(set(recordable_keys))))
cls.loaded_celltypes[model.name] = celltype
return celltype
| tclose/PyPe9 | pype9/simulate/neuron/network/cell_wrapper.py | Python | mit | 2,278 | [
"NEURON"
] | 352530795f5972f3df5e62bd6d98f8c138d39af43fe0c02bc286f2d0093306d7 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
The phasediagram package implements the analysis tools to perform phase
stability analyses, including the constructing of phase diagrams, determination
of decomposition products, etc. The package is designed to be fairly modular
and standalone.
"""
__author__ = "Shyue"
__date__ = "Mar 28 2013"
| migueldiascosta/pymatgen | pymatgen/phasediagram/__init__.py | Python | mit | 410 | [
"pymatgen"
] | 60d5b017278ed59c2f604e875e9e957349031fd12b81586ae022cc43a21da222 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Spglib(CMakePackage):
"""C library for finding and handling crystal symmetries."""
homepage = "https://atztogo.github.io/spglib/"
url = "https://github.com/atztogo/spglib/archive/v1.10.3.tar.gz"
patch('fix_cmake_install.patch', when='@:1.10.3')
# patch by Krishnendu Ghosh
patch('fix_cpp.patch', when='@:1.10.3')
version('1.10.3', 'f6ef0554fa528ffa49d8eaee18a2b7b9')
version('1.10.0', '0ad9330ae8a511d25e2e26cb9bf02808')
| krafczyk/spack | var/spack/repos/builtin/packages/spglib/package.py | Python | lgpl-2.1 | 1,724 | [
"CRYSTAL"
] | 541421509f89b1964589c7fd7957c878b7594a39eab7e5e0dd791a2f7c10ab94 |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to generate switch and route information from network.yaml."""
import collections
import sys
def AioNodeToTag(aio_node):
"""Returns the tag for an AIO node."""
return 'aio_nodes.%s' % aio_node.snake_name
def IsAioNode(tag):
"""Returns True iff tag represents an AIO node."""
return tag.startswith('aio_nodes.')
def TagToAioNode(tag):
"""Returns the AIO node for a switch port tag."""
_, aio_node = tag.split('.')
return aio_node
def PortToTag(switch, port):
"""Returns the tag for a port."""
return 'switches.%s.%d' % (switch, port)
def IsPort(tag):
"""Returns True iff tag represents a switch port."""
return tag.startswith('switches.')
def TagToPort(tag):
"""Returns the switch and port for a switch port tag."""
_, switch, port = tag.split('.')
return switch, int(port)
def IsSwitchLink(a, b):
"""Returns True iff a link connects ports on the same switch."""
return IsPort(a) and IsPort(b) and TagToPort(a)[0] == TagToPort(b)[0]
def IsSwitchLinkRoutable(switch, in_port, out_port, network_c_mode=False):
"""Returns True iff multicast packets can travel from in_port to out_port."""
config = switch.get('config', {})
if in_port == out_port:
return False
# On core switches, there is a set of ports which can't talk to each
# other, in order to prevent loops via redundant connections.
if 'isolate' in config:
isolated = config['isolate']
if in_port in isolated and out_port in isolated:
return False
if 'trunk' in config:
trunk_ports = config['trunk']['ports']
if in_port in trunk_ports and out_port in trunk_ports:
return False
# Allow blocking of network C traffic with the network_c_transit trunk
# setting.
if network_c_mode and 'network_c_transit' in config['trunk']:
network_c_transit = config['trunk']['network_c_transit']
if in_port in trunk_ports and in_port not in network_c_transit:
return False
if network_c_mode:
# Network C rides along with network A traffic.
network_a = config.get('network_a', [])
network_c = config.get('network_c', [])
if ((in_port not in network_a and in_port not in network_c)
or (out_port not in network_a and out_port not in network_c)):
return False
else:
# On access switches, don't let A-only ports talk to B-only ports.
# However, ports on both networks can talk to either.
if 'network_a' in config and 'network_b' in config:
network_a = config['network_a']
network_b = config['network_b']
in_a = in_port in network_a
in_b = in_port in network_b
out_a = out_port in network_a
out_b = out_port in network_b
in_neither = not in_a and not in_b
out_neither = not out_a and not out_b
in_a_only = in_a and not in_b
in_b_only = in_b and not in_a
out_a_only = out_a and not out_b
out_b_only = out_b and not out_a
if ((in_a_only and out_b_only) or (in_b_only and out_a_only) or
in_neither or out_neither):
return False
return True
class BadConnectionException(Exception):
pass
class BadMessageException(Exception):
pass
class BadRouteException(Exception):
pass
class SwitchMismatchException(Exception):
pass
class PathFinder(object):
"""Finds paths between AIO network nodes."""
def __init__(self, switches, message_types, network_c=False):
self._ValidateConnections(switches)
self._switches = switches
self._network_c = network_c
if not network_c:
destination_sets = self._ComputeDestinationSets(message_types)
else:
destination_sets = self._ComputeNetCDestinationSets(switches)
self._routes = self._ComputeAllRoutes(destination_sets)
def _ValidateConnections(self, switches):
"""Make sure each port is only used once and ends of a connection agree.
Args:
switches: the switches section from the processed YAML file.
Raises:
BadConnectionException: on any network error.
"""
connections = {}
for switch, sv in switches.iteritems():
for port, pv in sv['ports'].iteritems():
tag = PortToTag(switch, port)
if not pv:
sys.stderr.write('WARNING: %s is not connected.\n' % tag)
continue
if pv in connections:
if tag not in connections or connections[pv] is not connections[tag]:
raise BadConnectionException(
'%s thinks it is connected to %s, but %s disagrees!'
% (tag, pv, pv))
record = connections[pv]
else:
record = {
'count': 0,
}
connections[pv] = connections[tag] = record
if IsAioNode(pv): # We won't see another end of this connection.
record['count'] += 1
record['count'] += 1
for tag, record in connections.iteritems():
if record['count'] != 2:
raise BadConnectionException(
'%s is connected the wrong number of times: %s'
% (tag, str(record)))
def _ComputeDestinationSets(self, message_types):
"""Computes all {source: set(destinations)} used by the input messages."""
destination_sets = collections.defaultdict(set)
for m in message_types:
for route in m.routes:
for s in route.senders:
sender_tag = AioNodeToTag(s)
destination_set = destination_sets[sender_tag]
for r in route.receivers:
receiver_tag = AioNodeToTag(r)
if receiver_tag != sender_tag:
destination_set.add(receiver_tag)
return destination_sets
def _ComputeAllRoutes(self, destination_sets):
"""Computes all routes described by destination_sets.
Args:
destination_sets: A map from source to set of destinations.
Returns:
A map from source to a map from destination to a set of paths from source
to that destination. Each path is a list of port/AioNode tags starting
from the source and ending with the destination.
For example:
{ 'aio_nodes.fc_a' : # Routes from fc_a.
{ 'aio_nodes.recorder_wing': [ # routes from fc_a to recorder_wing.
['aio_nodes.fc_a', 'switches.fc_a.0', ..., # The first route.
'aio_nodes.recorder_wing],
['aio_nodes.fc_a', 'switches.fc_a.0', ..., # The second route.
'aio_nodes.recorder_wing],
...],
...
],
'aio_nodes.motor_sbo': [ # routes from fc_a to motor_sbo.
...
}
}
"""
path_sets = {}
for source, destinations in destination_sets.iteritems():
ps = path_sets[source] = {d: [] for d in destinations}
paths = self._Dfs(source, destinations, coalesce=False)
for path in paths:
ps[path[-1]].append(path)
return path_sets
def _ComputeNetCDestinationSets(self, switches):
edges = set()
for switch_name, switch in switches.iteritems():
for port in switch.get('config', {}).get('network_c', []):
edges.add(PortToTag(switch_name, port))
destination_sets = {}
for source in edges:
destination_sets[source] = edges.copy()
destination_sets[source].remove(source)
return destination_sets
def _FindNeighbor(self, tag):
"""Returns the neighboring node, or None if no neighboring node is found."""
if IsAioNode(tag):
for switch, sv in self._switches.iteritems():
for port, pv in sv['ports'].iteritems():
if pv == tag:
return PortToTag(switch, port)
return None
else:
switch, port = TagToPort(tag)
return self._switches[switch]['ports'][port]
def FindAllPaths(self, coalesce=True):
"""Returns all of the paths in this network topology."""
paths = []
for source in self._routes:
for destination in self._routes[source]:
routes = self._routes[source][destination]
if not routes:
raise BadRouteException('No route from %s to %s' %
(source, destination))
paths.extend(routes)
if coalesce:
paths = self._CoalescePaths(paths)
return paths
def FindPaths(self, name, source, destinations, coalesce=True):
r"""Finds the paths taken by a packet.
If coalesce is True, the paths will account for multicast replication. For
example, consider the graph:
B - D1
/ \ /
S -- A x
\ / \
C - D2
Result with coalesce=False:
[[S, A, B, D1], [S, A, B, D2], [S, A, C, D1], [S, A, C, D2]]
Result with coalesce=True:
[[S, A, B, D1], [B, D2], [A, C, D1], [C, D2]]
With coalesce=True, each link appears in the result exactly the number of
times that a packet and its multicast replicas traverse it.
Args:
name: Message name.
source: Source node.
destinations: List of destination nodes.
coalesce: See above.
Returns:
A list of paths taken by a packet, where each path is a list of
port/AioNode tags starting from the source and ending with a destination.
Raises:
BadRouteException: if no route can be found for a destination.
"""
paths = []
for destination in destinations:
if destination == source:
continue
if destination not in self._routes[source]:
continue
routes = self._routes[source][destination]
routes = [r for r in routes if not self._IsRestrictedRoute(name, r)]
if not routes:
raise BadRouteException('No route from %s to %s for message %s.' %
(source, destination, name))
paths.extend(routes)
if coalesce:
paths = self._CoalescePaths(paths)
return paths
def _CoalescePaths(self, paths):
"""Coalesce paths as described under FindPaths."""
paths = sorted(paths)
coalesced = [paths[0]]
for p in range(1, len(paths)):
last = paths[p - 1]
cur = paths[p]
path = []
for i in range(1, len(cur)):
if i > len(last) or last[i] != cur[i]:
path.extend(cur[i-1:])
break
coalesced.append(path)
return coalesced
def _IsRestrictedRoute(self, message, route):
"""Restrict route by message type."""
for i in range(len(route) - 1):
egress = route[i]
ingress = route[i + 1]
if not IsPort(ingress) or not IsPort(egress):
continue
if IsSwitchLink(ingress, egress):
continue
out_switch_name, out_port = TagToPort(egress)
out_switch = self._switches[out_switch_name]
if 'config' in out_switch:
restrict = out_switch['config'].get('restrict', {})
if out_port in restrict and message not in restrict[out_port]:
return True
if 'trunk' in out_switch['config']:
trunk = out_switch['config']['trunk']
if out_port in trunk['ports']:
override = trunk.get('override_message_routes', {})
for override_message, override_ports in override.iteritems():
if message == override_message and out_port not in override_ports:
return True
return False
def _Dfs(self, source, destinations, coalesce=True):
"""Finds the paths taken by a packet."""
# Persistent state for _Dfs.
self._paths = []
self._destinations = destinations
self._coalesce = coalesce
self._DfsHelper(source, [], 0)
return self._paths
def _DfsHelper(self, node, path, base):
"""Recursively visit a node.
Path coalescing is implemented by treating the outgoing paths from a node as
one "parent" path and zero or more "child" paths. Parent paths preserve the
path base, while child traversals set the base to the current node. The
returned paths begin at the base. Because only the parent path preserves the
base, the incoming path component will only be counted once. This gives an
accurate representation of multicast bandwidth utilization.
Args:
node: Tag of the node to visit.
path: Path so far, as an array of tags, not yet including node.
base: Index of the starting node of this traversal in path.
Returns:
True iff a path was found.
"""
# Found a cycle.
if node in path:
return False
# Ignore paths that hit the same switch more than once.
if IsPort(node) and len([n for n in path if IsSwitchLink(node, n)]) > 1:
return False
found_path = False
path.append(node)
if len(path) > 1:
if node in self._destinations:
self._paths.append(path[base:])
path.pop()
return True # Destinations are always network leaves.
elif IsAioNode(node):
path.pop()
return False # AioNodes are always network leaves.
if IsAioNode(node) or (len(path) >= 2 and IsSwitchLink(node, path[-2])):
# The current node is an AIO node or an egress port, so there is a maximum
# of one neighbor to traverse.
neighbor = self._FindNeighbor(node)
if neighbor:
found_path = self._DfsHelper(neighbor, path, base)
else:
# The current node is an ingress port. Traverse all allowed egress ports.
switch_name, in_port = TagToPort(node)
switch = self._switches[switch_name]
ports = switch['ports']
if self._network_c:
for net_c_port in switch.get('config', {}).get('network_c', []):
ports[net_c_port] = 'network_c_device'
for out_port in switch['ports']:
if not IsSwitchLinkRoutable(switch, in_port, out_port,
network_c_mode=self._network_c):
continue
found_path |= self._DfsHelper(PortToTag(switch_name, out_port),
path, base)
if found_path and self._coalesce:
base = len(path) - 1
path.pop()
return found_path
def GetSwitches(self):
return self._switches
def GetAllConnections(self):
"""List every network connection used in any route.
Returns:
A list of tuples of ports, listing each connection twice--once in each
direction.
"""
connections = set()
for destination_set in self._routes.itervalues():
for path_list in destination_set.itervalues():
for path in path_list:
for link in zip(path[:-1], path[1:]):
connections.add(link)
return connections
def GetHops(self, name, source, destinations, unicast=False):
"""List all switch hops between source and destination.
Args:
name: Message name.
source: Source node.
destinations: List of destination nodes.
unicast: Require unicast support on path.
Returns:
A list of paths taken by a packet, where each path is a list of switches
that the packet passes through.
"""
paths = self.FindPaths(name, source, destinations, coalesce=False)
hops = []
hop_set = set()
for path in paths:
unicast_path = True
path_hops = []
for index, hop in enumerate(path[:-1]):
ingress = path[index]
egress = path[index + 1]
if not IsSwitchLink(ingress, egress):
continue
switch_name, port = TagToPort(hop)
switch = self._switches[switch_name]
unicast_path = ('config' in switch
and 'unicast' in switch['config']
and port in switch['config']['unicast'])
path_hops.append(switch_name)
if (not unicast or unicast_path) and tuple(path_hops) not in hop_set:
hop_set.add(tuple(path_hops))
hops.append(path_hops)
return hops
def GetFirstSwitch(self, name, source, destinations, switches):
"""List the first switch within switches that a packet routes through.
Args:
name: Message name.
source: Source node.
destinations: List of destination nodes.
switches: Switches to consider.
Returns:
A list of the first switches that a packet routes through.
"""
paths = self.GetHops(name, source, destinations)
first_switches = []
for path in paths:
for hop in path:
if hop in switches:
first_switches.append(hop)
break
return first_switches
def GetAttachedNodes(self, switch_name):
"""Get a list of nodes attached to the given switch."""
aio_nodes = []
switch = self._switches.get(switch_name)
if switch:
for tag in switch['ports'].values():
if IsAioNode(tag):
aio_nodes.append(TagToAioNode(tag))
return aio_nodes
def MakeForwardingMaps(message_types, path_finder):
"""Creates a forwarding map for each switch.
The bitmask tells which ports should receive a given message type,
e.g. 0x31 forwards to ports 0, 4, and 5.
Args:
message_types: {message_types: type_data}, corresponding to the
"message_types" field of the YAML file.
path_finder: PathFinder.
Returns:
A nested dictionary of the form
{switch_name: {message_type: port_bitmask}}.
"""
forwarding_maps = collections.defaultdict(
lambda: collections.defaultdict(lambda: 0))
for m in message_types:
if m.inhibit_routing:
continue
for route in m.routes:
if not route.senders or not route.receivers:
sys.stderr.write('WARNING: Skipping %s; no route found.\n' % m.name)
continue
for sender in route.senders:
paths = path_finder.FindPaths(
m.name, AioNodeToTag(sender),
{AioNodeToTag(x) for x in route.receivers})
for path in paths:
for link in zip(path[:-1], path[1:]):
if IsSwitchLink(*link):
switch, out_port = TagToPort(link[1])
forwarding_maps[switch][m.name] |= (1 << out_port)
return forwarding_maps
def MakeNetworkCForwardingMap(path_finder):
"""Creates a network C forwarding map for each switch.
The bitmask tells which ports should forward network C traffic.
Args:
path_finder: PathFinder.
Returns:
A dictionary of the form {switch_name: port_bitmask}.
"""
forwarding_map = collections.defaultdict(lambda: 0)
paths = path_finder.FindAllPaths()
for path in paths:
for link in zip(path[:-1], path[1:]):
if IsSwitchLink(*link):
switch, out_port = TagToPort(link[1])
forwarding_map[switch] |= (1 << out_port)
return forwarding_map
class MessageGraph(object):
"""Compute the multicast route graph for a given message type."""
def __init__(self, path_finder, message):
self._switches = path_finder.GetSwitches()
self._message = message
# Compute graph.
# Map segment in_tag to a set of out_tags.
self._segments = collections.defaultdict(set)
# Map switch name to a set of egress ports.
self._switch_egress = collections.defaultdict(set)
for route in message.routes:
# Iterate through all senders to build the graph because we use the
# same forwarding maps for all network configurations.
for sender in route.senders:
paths = path_finder.FindPaths(
message.name, AioNodeToTag(sender),
{AioNodeToTag(x) for x in route.receivers}, coalesce=False)
for path in paths:
for source, dest in zip(path[:-1], path[1:]):
self._segments[source].add(dest)
if IsSwitchLink(source, dest):
# Link switch_port -> internal switch_port.
pass
elif IsPort(source):
# Link switch_port -> aio_node or external switch_port.
switch_name, _ = TagToPort(source)
self._switch_egress[switch_name].add(source)
else:
# Link aio_node -> switch_port.
pass
def GetMessage(self):
return self._message
def VisitSenders(self, visitor, senders, *args, **kwargs):
for sender in senders:
out_tag = AioNodeToTag(sender)
self.VisitAioNode(out_tag, visitor, sender, *args, **kwargs)
def VisitAioNode(self, in_tag, visitor, *args, **kwargs):
self.VisitRemoteLinks(in_tag, visitor, *args, **kwargs)
def VisitLocalSwitchLinks(self, in_tag, visitor, *args, **kwargs):
"""Visit internal switch port to switch port links."""
switch_name, in_port = TagToPort(in_tag)
switch = self._switches[switch_name]
out_tags = [t for t in self._switch_egress[switch_name]
if IsSwitchLinkRoutable(switch, in_port, TagToPort(t)[1])]
visitor.HandleLocalSwitchLinks(self, in_tag, out_tags, *args, **kwargs)
def VisitRemoteLinks(self, in_tag, visitor, *args, **kwargs):
"""Visit external switch port to remote port links."""
out_tags = [t for t in self._segments[in_tag]
if not IsSwitchLink(in_tag, t)]
visitor.HandleRemoteLinks(self, in_tag, out_tags, *args, **kwargs)
class MessageGraphVisitor(object):
"""Visit each node in message graph."""
def __init__(self):
self._path = []
def _Push(self, tag):
if tag in self._path:
raise BadConnectionException('Loop found at %s.' % self._path)
self._path.append(tag)
def _Pop(self):
self._path.pop()
def GetPath(self):
return list(self._path)
def HandleLocalSwitchLinks(self, graph, in_tag, out_tags, *args, **kwargs):
"""Follow internal switch port to switch port links."""
self._Push(in_tag)
for out_tag in out_tags:
self.HandleSegment(graph, in_tag, out_tag, *args, **kwargs)
graph.VisitRemoteLinks(out_tag, self, *args, **kwargs)
self._Pop()
def HandleRemoteLinks(self, graph, in_tag, out_tags, *args, **kwargs):
"""Follow external switch port to remote port links."""
self._Push(in_tag)
for out_tag in out_tags:
self.HandleSegment(graph, in_tag, out_tag, *args, **kwargs)
if IsAioNode(out_tag):
self.HandleAioNode(graph, out_tag, *args, **kwargs)
elif IsPort(out_tag):
graph.VisitLocalSwitchLinks(out_tag, self, *args, **kwargs)
else:
raise BadConnectionException('Unexpected tag %s.' % out_tag)
self._Pop()
def HandleSegment(self, graph, in_tag, out_tag, *args, **kwargs): # pylint: disable=unused-argument
pass
def HandleAioNode(self, graph, in_tag, *args, **kwargs): # pylint: disable=unused-argument
pass
class SegmentStatisticsVisitor(MessageGraphVisitor):
"""Collect packet statistics by segment and sender."""
def __init__(self):
self.stats = collections.defaultdict(
lambda: collections.defaultdict( # pylint: disable=g-long-lambda
lambda: collections.defaultdict(int)))
super(SegmentStatisticsVisitor, self).__init__()
def HandleSegment(self, graph, in_tag, out_tag, sender):
segment = (in_tag, out_tag)
s = self.stats[segment][sender]
s['packets_per_sec'] += graph.GetMessage().frequency_hz
s['peak'] += 1
def GetSegmentStats(graph, senders):
"""Gather the bandwidth statistics for each segment.
Args:
graph: A MessageGraph object.
senders: A list of AioNode objects for each sender to consider.
Returns:
A dictionary of one-way segment statistics, keyed by the segment tuple
(in_tag, out_tag). The segment statistics describe the data flowing from
the in_tag to the out_tag.
A typical example is:
{
<(in_tag, out_tag)>: {
<sender>: {
'packets_per_sec': <freq>,
'peak': <count>
}
}
}
"""
visitor = SegmentStatisticsVisitor()
graph.VisitSenders(visitor, senders)
return visitor.stats
def GetNodeBandwidthStatistics(path_finder, message_types):
"""Gather the bandwidth statistics for each endpoint.
Args:
path_finder: PathFinder.
message_types: A list of MessageTypes.
Returns:
A dictionary of AIO node configurations, keyed by AIO node names.
Each AIO node configuration contains 'send', 'receive', and aggregated
rates for transmitting and receiving packets.
A typical example is:
{
<aio_node>: {
'send': {
<message_type>: <freq>
},
'receive': {
<message_type>: <freq>
},
'multicast_packet_rate': {
'tx': <#_of_transmitted_packets_per_second>,
'rx': <#_of_received_packets_per_second>,
}
}
}
"""
Stat = collections.namedtuple( # pylint: disable=invalid-name
'Stat', ['send', 'receive', 'multicast_packet_rate'])
# Stats per node, per message.
node_stats = collections.defaultdict(
lambda: Stat( # pylint: disable=g-long-lambda
collections.defaultdict(lambda: 0),
collections.defaultdict(lambda: 0),
collections.defaultdict(lambda: 0)))
for m in message_types:
if not m.frequency_hz:
continue
graph = MessageGraph(path_finder, m)
message_stats = GetSegmentStats(graph, m.all_senders)
for segment, senders in message_stats.iteritems():
for stats in senders.values():
packets_per_sec = stats['packets_per_sec']
if IsAioNode(segment[0]):
# AioNode sends message.
node = segment[0]
node_stats[node].send[m.name] += packets_per_sec
node_stats[node].multicast_packet_rate['tx'] += packets_per_sec
elif IsAioNode(segment[1]):
# AioNode receives message.
node = segment[1]
node_stats[node].receive[m.name] += packets_per_sec
node_stats[node].multicast_packet_rate['rx'] += packets_per_sec
return node_stats
class RecipientsVisitor(MessageGraphVisitor):
"""Collect recipients by sender."""
def __init__(self):
self.recipients = collections.defaultdict(set)
self.paths = collections.defaultdict(list)
super(RecipientsVisitor, self).__init__()
def HandleAioNode(self, graph, in_tag, sender):
self.recipients[sender].add(in_tag)
self.paths[(sender, TagToAioNode(in_tag))].append(self.GetPath())
def CheckForUnintendedRecipients(graph):
"""Check for unintended recipients in multicast routing graph."""
message = graph.GetMessage()
visitor = RecipientsVisitor()
graph.VisitSenders(visitor, message.all_senders)
for sender in message.all_senders:
# Actual receivers are those who receive messages from 'sender' as a
# consequence of the multicast routes.
actual_receivers = set(TagToAioNode(t) for t in visitor.recipients[sender])
# Expected receivers are those from the union of receivers from all routes
# with 'sender' in senders.
expected_receivers = set()
for route in [r for r in message.routes if sender in r.senders]:
expected_receivers |= set(n.snake_name for n in route.receivers)
unexpected_receivers = actual_receivers - expected_receivers
if unexpected_receivers:
print 'Unexpected network routes:'
for r in unexpected_receivers:
for p in visitor.paths[(sender, r)]:
print p
raise BadConnectionException(
'Network routes cross: %s should not receive %s messages from %s.'
% (list(unexpected_receivers), message.name, sender.snake_name))
def GetAttachedSwitch(node, switches):
node_tag = AioNodeToTag(node)
for switch_name, switch in switches.iteritems():
for tag in switch['ports'].values():
if node_tag == tag:
return switch_name
raise BadConnectionException('Could not find switch for node %s.' % node)
| google/makani | avionics/network/network_util.py | Python | apache-2.0 | 27,972 | [
"VisIt"
] | ed8f079b7545d6aa3a4748b4dab1eca0f6377d55620fee1e06cf705c52dba493 |
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_user_input_material
---------------------------
Shows user how to input a mineral of his/her choice without usint the library and which physical values
need to be input for BurnMan to calculate :math:`V_P, V_\Phi, V_S` and density at depth.
*Specifically uses:*
* :class:`burnman.mineral.Mineral`
*Demonstrates:*
* how to create your own minerals
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1, os.path.abspath('..'))
import burnman
# A note about units: all the material parameters are expected to be in plain SI units.
# This means that the elastic moduli should be in Pascals and NOT Gigapascals,
# and the Debye temperature should be in K not C. Additionally, the reference volume
# should be in m^3/(mol molecule) and not in unit cell volume and 'n' should be
# the number of atoms per molecule. Frequently in the literature the reference volume
# is given in Angstrom^3 per unit cell. To convert this to m^3/(mol of molecule)
# you should multiply by 10^(-30) * N_a / Z, where N_a is Avogadro's number and Z is the number of
# atoms per unit cell. You can look up Z in many places, including
# www.mindat.org
if __name__ == "__main__":
# input variables ###
#
# INPUT for method
""" choose 'slb2' (finite-strain 2nd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'slb3 (finite-strain 3rd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'mgd3' (mie-gruneisen-debeye 3rd order shear modulus,
matas et al. 2007)
or 'mgd2' (mie-gruneisen-debeye 2nd order shear modulus,
matas et al. 2007)
or 'bm2' (birch-murnaghan 2nd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))
or 'bm3' (birch-murnaghan 3rd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case)
or 'vinet' (vinet equation of state, if you choose to ignore temperature
(your choice in geotherm will not matter in this case)))"""
method = 'slb3'
# in form name_of_mineral (burnman.mineral <- creates list with parameters)
class own_material (burnman.Mineral):
def __init__(self):
self.params = {
'name': 'myownmineral',
'equation_of_state': method,
'V_0': 10.844e-6, # Molar volume [m^3/(mole molecules)]
# at room pressure/temperature
'K_0': 135.19e9, # Reference bulk modulus [Pa]
# at room pressure/temperature
'Kprime_0': 6.04, # pressure derivative of bulk modulus
'G_0': 175.0e9, # reference shear modulus
# at room pressure/temperature
'Gprime_0': 1.7, # pressure derivative of shear modulus
'molar_mass': .055845, # molar mass in units of [kg/mol]
'n': 1, # number of atoms per formula unit
'Debye_0': 998.85, # Debye temperature for material.
# See Stixrude & Lithgow-Bertelloni, 2005 for values
'grueneisen_0': 1.368, # Gruneisen parameter for material.
# See Stixrude & Lithgow-Bertelloni, 2005 for values
'q_0': 0.917, # isotropic strain derivative of gruneisen
# parameter. Values in Stixrude & Lithgow-Bertelloni, 2005
'eta_s_0': 3.0 # full strain derivative of gruneisen parameter
# parameter. Values in Stixrude & Lithgow-Bertelloni, 2005
}
burnman.Mineral.__init__(self)
rock = own_material()
# seismic model for comparison: (see burnman/seismic.py)
seismic_model = burnman.seismic.PREM() # pick from .prem() .slow() .fast()
number_of_points = 20 # set on how many depth slices the computations should be done
depths = np.linspace(700e3, 2800e3, number_of_points)
# depths = seismic_model.internal_depth_list(mindepth=700.e3,
# maxdepth=2800.e3)
seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate(
['pressure', 'density', 'v_p', 'v_s', 'v_phi'], depths)
temperature = burnman.geotherm.brown_shankland(depths)
# The next line is not required here, because the method is set
# automatically by defining 'equation_of_state' in mineral.params. This
# shows an alternative way to set the method later, or reset the method to
# a different one.
rock.set_method(method)
print("Calculations are done for:")
rock.debug_print()
mat_rho, mat_vs, mat_vphi = \
rock.evaluate(['density', 'v_s', 'v_phi'], seis_p, temperature)
[vs_err, vphi_err, rho_err] = \
burnman.compare_chifactor(
[mat_vs, mat_vphi, mat_rho], [seis_vs, seis_vphi, seis_rho])
print(vs_err, vphi_err, rho_err)
| CaymanUnterborn/burnman | examples/example_user_input_material.py | Python | gpl-2.0 | 5,259 | [
"Avogadro"
] | dda3a8469e8c8d122d319ccadb71aeb7f01f7064b7dc6eeb86a763a9b450db9c |
########################################################################
# File : AgentModule.py
# Author : Adria Casajus
########################################################################
"""
Base class for all agent modules
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import threading
import time
import signal
import DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, gLogger, rootPath
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.Core.Utilities import Time, MemStat, Network
from DIRAC.Core.Utilities.Shifter import setupShifterProxyInEnv
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.FrameworkSystem.Client.MonitoringClient import MonitoringClient
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
class AgentModule(object):
""" Base class for all agent modules
This class is used by the AgentReactor Class to steer the execution of
DIRAC Agents.
For this purpose the following methods are used:
- am_initialize() just after instantiated
- am_getPollingTime() to set the execution frequency
- am_getMaxCycles() to determine the number of cycles
- am_go() for the actual execution of one cycle
Before each iteration, the following methods are used to determine
if the new cycle is to be started.
- am_getModuleParam( 'alive' )
- am_checkStopAgentFile()
- am_removeStopAgentFile()
To start new execution cycle the following methods are used
- am_getCyclesDone()
- am_setOption( 'MaxCycles', maxCycles )
At the same time it provides all Agents with common interface.
All Agent class must inherit from this base class and must implement
at least the following method:
- execute() main method called in the agent cycle
Additionally they may provide:
- initialize() for initial settings
- finalize() the graceful exit
- beginExecution() before each execution cycle
- endExecution() at the end of each execution cycle
The agent can be stopped either by a signal or by creating a 'stop_agent' file
in the controlDirectory defined in the agent configuration
"""
def __init__(self, agentName, loadName, baseAgentName=False, properties={}):
"""
Common __init__ method for all Agents.
All Agent modules must define:
__doc__
__RCSID__
They are used to populate __codeProperties
The following Options are used from the Configuration:
- /LocalSite/InstancePath
- /DIRAC/Setup
- Status
- Enabled
- PollingTime default = 120
- MaxCycles default = 500
- WatchdogTime default = 0 (disabled)
- ControlDirectory control/SystemName/AgentName
- WorkDirectory work/SystemName/AgentName
- shifterProxy ''
- shifterProxyLocation WorkDirectory/SystemName/AgentName/.shifterCred
It defines the following default Options that can be set via Configuration (above):
- MonitoringEnabled True
- Enabled True if Status == Active
- PollingTime 120
- MaxCycles 500
- ControlDirectory control/SystemName/AgentName
- WorkDirectory work/SystemName/AgentName
- shifterProxy False
- shifterProxyLocation work/SystemName/AgentName/.shifterCred
different defaults can be set in the initialize() method of the Agent using am_setOption()
In order to get a shifter proxy in the environment during the execute()
the configuration Option 'shifterProxy' must be set, a default may be given
in the initialize() method.
"""
if baseAgentName and agentName == baseAgentName:
self.log = gLogger
standaloneModule = True
else:
self.log = gLogger.getSubLogger(agentName, child=False)
standaloneModule = False
self.__basePath = gConfig.getValue('/LocalSite/InstancePath', rootPath)
self.__agentModule = None
self.__codeProperties = {}
self.__getCodeInfo()
self.__moduleProperties = {'fullName': agentName,
'loadName': loadName,
'section': PathFinder.getAgentSection(agentName),
'loadSection': PathFinder.getAgentSection(loadName),
'standalone': standaloneModule,
'cyclesDone': 0,
'totalElapsedTime': 0,
'setup': gConfig.getValue("/DIRAC/Setup", "Unknown"),
'alive': True}
self.__moduleProperties['system'], self.__moduleProperties['agentName'] = agentName.split("/")
self.__configDefaults = {}
self.__configDefaults['MonitoringEnabled'] = True
self.__configDefaults['Enabled'] = self.am_getOption("Status", "Active").lower() in ('active')
self.__configDefaults['PollingTime'] = self.am_getOption("PollingTime", 120)
self.__configDefaults['MaxCycles'] = self.am_getOption("MaxCycles", 500)
self.__configDefaults['WatchdogTime'] = self.am_getOption("WatchdogTime", 0)
self.__configDefaults['ControlDirectory'] = os.path.join(self.__basePath,
'control',
*agentName.split("/"))
self.__configDefaults['WorkDirectory'] = os.path.join(self.__basePath,
'work',
*agentName.split("/"))
self.__configDefaults['shifterProxy'] = ''
self.__configDefaults['shifterProxyLocation'] = os.path.join(self.__configDefaults['WorkDirectory'],
'.shifterCred')
if isinstance(properties, dict):
for key in properties:
self.__moduleProperties[key] = properties[key]
self.__moduleProperties['executors'] = [(self.execute, ())]
self.__moduleProperties['shifterProxy'] = False
self.__monitorLastStatsUpdate = -1
self.monitor = None
self.__initializeMonitor()
self.__initialized = False
def __getCodeInfo(self):
versionVar = "__RCSID__"
docVar = "__doc__"
try:
self.__agentModule = __import__(self.__class__.__module__,
globals(),
locals(),
versionVar)
except Exception as excp:
self.log.exception("Cannot load agent module", lException=excp)
for prop in ((versionVar, "version"), (docVar, "description")):
try:
self.__codeProperties[prop[1]] = getattr(self.__agentModule, prop[0])
except Exception:
self.log.error("Missing property", prop[0])
self.__codeProperties[prop[1]] = 'unset'
self.__codeProperties['DIRACVersion'] = DIRAC.version
self.__codeProperties['platform'] = DIRAC.getPlatform()
def am_initialize(self, *initArgs):
""" Common initialization for all the agents.
This is executed every time an agent (re)starts.
This is called by the AgentReactor, should not be overridden.
"""
agentName = self.am_getModuleParam('fullName')
result = self.initialize(*initArgs)
if not isReturnStructure(result):
return S_ERROR("initialize must return S_OK/S_ERROR")
if not result['OK']:
return S_ERROR("Error while initializing %s: %s" % (agentName, result['Message']))
mkDir(self.am_getControlDirectory())
workDirectory = self.am_getWorkDirectory()
mkDir(workDirectory)
# Set the work directory in an environment variable available to subprocesses if needed
os.environ['AGENT_WORKDIRECTORY'] = workDirectory
self.__moduleProperties['shifterProxy'] = self.am_getOption('shifterProxy')
if self.am_monitoringEnabled() and not self.activityMonitoring:
self.monitor.enable()
if len(self.__moduleProperties['executors']) < 1:
return S_ERROR("At least one executor method has to be defined")
if not self.am_Enabled():
return S_ERROR("Agent is disabled via the configuration")
self.log.notice("=" * 40)
self.log.notice("Loaded agent module %s" % self.__moduleProperties['fullName'])
self.log.notice(" Site: %s" % DIRAC.siteName())
self.log.notice(" Setup: %s" % gConfig.getValue("/DIRAC/Setup"))
self.log.notice(" Base Module version: %s " % __RCSID__)
self.log.notice(" Agent version: %s" % self.__codeProperties['version'])
self.log.notice(" DIRAC version: %s" % DIRAC.version)
self.log.notice(" DIRAC platform: %s" % DIRAC.getPlatform())
pollingTime = int(self.am_getOption('PollingTime'))
if pollingTime > 3600:
self.log.notice(" Polling time: %s hours" % (pollingTime / 3600.))
else:
self.log.notice(" Polling time: %s seconds" % self.am_getOption('PollingTime'))
self.log.notice(" Control dir: %s" % self.am_getControlDirectory())
self.log.notice(" Work dir: %s" % self.am_getWorkDirectory())
if self.am_getOption('MaxCycles') > 0:
self.log.notice(" Cycles: %s" % self.am_getMaxCycles())
else:
self.log.notice(" Cycles: unlimited")
if self.am_getWatchdogTime() > 0:
self.log.notice(" Watchdog interval: %s" % self.am_getWatchdogTime())
else:
self.log.notice(" Watchdog interval: disabled ")
self.log.notice("=" * 40)
self.__initialized = True
return S_OK()
def am_getControlDirectory(self):
return os.path.join(self.__basePath, str(self.am_getOption('ControlDirectory')))
def am_getStopAgentFile(self):
return os.path.join(self.am_getControlDirectory(), 'stop_agent')
def am_checkStopAgentFile(self):
return os.path.isfile(self.am_getStopAgentFile())
def am_createStopAgentFile(self):
try:
with open(self.am_getStopAgentFile(), 'w') as fd:
fd.write('Dirac site agent Stopped at %s' % Time.toString())
except Exception:
pass
def am_removeStopAgentFile(self):
try:
os.unlink(self.am_getStopAgentFile())
except Exception:
pass
def am_getBasePath(self):
return self.__basePath
def am_getWorkDirectory(self):
return os.path.join(self.__basePath, str(self.am_getOption('WorkDirectory')))
def am_getShifterProxyLocation(self):
return os.path.join(self.__basePath, str(self.am_getOption('shifterProxyLocation')))
def am_getOption(self, optionName, defaultValue=None):
""" Gets an option from the agent's configuration section.
The section will be a subsection of the /Systems section in the CS.
"""
if defaultValue is None:
if optionName in self.__configDefaults:
defaultValue = self.__configDefaults[optionName]
if optionName and optionName[0] == "/":
return gConfig.getValue(optionName, defaultValue)
for section in (self.__moduleProperties['section'], self.__moduleProperties['loadSection']):
result = gConfig.getOption("%s/%s" % (section, optionName), defaultValue)
if result['OK']:
return result['Value']
return defaultValue
def am_setOption(self, optionName, value):
self.__configDefaults[optionName] = value
def am_getModuleParam(self, optionName):
return self.__moduleProperties[optionName]
def am_setModuleParam(self, optionName, value):
self.__moduleProperties[optionName] = value
def am_getPollingTime(self):
return self.am_getOption("PollingTime")
def am_getMaxCycles(self):
return self.am_getOption("MaxCycles")
def am_getWatchdogTime(self):
return int(self.am_getOption("WatchdogTime"))
def am_getCyclesDone(self):
return self.am_getModuleParam('cyclesDone')
def am_Enabled(self):
return self.am_getOption("Enabled")
def am_disableMonitoring(self):
self.am_setOption('MonitoringEnabled', False)
def am_monitoringEnabled(self):
return self.am_getOption("MonitoringEnabled")
def am_stopExecution(self):
self.am_setModuleParam('alive', False)
def __initializeMonitor(self):
"""
Initialize the system monitoring.
"""
# This flag is used to activate ES based monitoring
# if the "EnableActivityMonitoring" flag in "yes" or "true" in the cfg file.
self.activityMonitoring = (
Operations().getValue("EnableActivityMonitoring", False) or
self.am_getOption("EnableActivityMonitoring", False)
)
if self.activityMonitoring:
# The import needs to be here because of the CS must be initialized before importing
# this class (see https://github.com/DIRACGrid/DIRAC/issues/4793)
from DIRAC.MonitoringSystem.Client.MonitoringReporter import MonitoringReporter
self.activityMonitoringReporter = MonitoringReporter(monitoringType="ComponentMonitoring")
# With the help of this periodic task we commit the data to ES at an interval of 100 seconds.
gThreadScheduler.addPeriodicTask(100, self.__activityMonitoringReporting)
else:
if self.__moduleProperties['standalone']:
self.monitor = gMonitor
else:
self.monitor = MonitoringClient()
self.monitor.setComponentType(self.monitor.COMPONENT_AGENT)
self.monitor.setComponentName(self.__moduleProperties['fullName'])
self.monitor.initialize()
self.monitor.registerActivity('CPU', "CPU Usage", 'Framework', "CPU,%", self.monitor.OP_MEAN, 600)
self.monitor.registerActivity('MEM', "Memory Usage", 'Framework', 'Memory,MB', self.monitor.OP_MEAN, 600)
# Component monitor
for field in ('version', 'DIRACVersion', 'description', 'platform'):
self.monitor.setComponentExtraParam(field, self.__codeProperties[field])
self.monitor.setComponentExtraParam('startTime', Time.dateTime())
self.monitor.setComponentExtraParam('cycles', 0)
self.monitor.disable()
self.__monitorLastStatsUpdate = time.time()
def am_secureCall(self, functor, args=(), name=False):
if not name:
name = str(functor)
try:
result = functor(*args)
if not isReturnStructure(result):
raise Exception(
"%s method for %s module has to return S_OK/S_ERROR" %
(name, self.__moduleProperties['fullName']))
return result
except Exception as e:
self.log.exception("Agent exception while calling method %s" % name, lException=e)
return S_ERROR("Exception while calling %s method: %s" % (name, str(e)))
def _setShifterProxy(self):
if self.__moduleProperties["shifterProxy"]:
result = setupShifterProxyInEnv(self.__moduleProperties["shifterProxy"],
self.am_getShifterProxyLocation())
if not result['OK']:
self.log.error("Failed to set shifter proxy", result['Message'])
return result
return S_OK()
def am_go(self):
# Set the shifter proxy if required
result = self._setShifterProxy()
if not result['OK']:
return result
self.log.notice("-" * 40)
self.log.notice("Starting cycle for module %s" % self.__moduleProperties['fullName'])
mD = self.am_getMaxCycles()
if mD > 0:
cD = self.__moduleProperties['cyclesDone']
self.log.notice("Remaining %s of %s cycles" % (mD - cD, mD))
self.log.notice("-" * 40)
# use SIGALARM as a watchdog interrupt if enabled
watchdogInt = self.am_getWatchdogTime()
if watchdogInt > 0:
signal.signal(signal.SIGALRM, signal.SIG_DFL)
signal.alarm(watchdogInt)
elapsedTime = time.time()
cpuStats = self._startReportToMonitoring()
cycleResult = self.__executeModuleCycle()
if cpuStats:
self._endReportToMonitoring(*cpuStats)
# Increment counters
self.__moduleProperties['cyclesDone'] += 1
# Show status
elapsedTime = time.time() - elapsedTime
self.__moduleProperties['totalElapsedTime'] += elapsedTime
self.log.notice("-" * 40)
self.log.notice("Agent module %s run summary" % self.__moduleProperties['fullName'])
self.log.notice(" Executed %s times previously" % self.__moduleProperties['cyclesDone'])
self.log.notice(" Cycle took %.2f seconds" % elapsedTime)
averageElapsedTime = self.__moduleProperties['totalElapsedTime'] / self.__moduleProperties['cyclesDone']
self.log.notice(" Average execution time: %.2f seconds" % (averageElapsedTime))
elapsedPollingRate = averageElapsedTime * 100 / self.am_getOption('PollingTime')
self.log.notice(" Polling time: %s seconds" % self.am_getOption('PollingTime'))
self.log.notice(" Average execution/polling time: %.2f%%" % elapsedPollingRate)
if cycleResult['OK']:
self.log.notice(" Cycle was successful")
if self.activityMonitoring:
# Here we record the data about the cycle duration along with some basic details about the
# component and right now it isn't committed to the ES backend.
self.activityMonitoringReporter.addRecord({
'timestamp': int(Time.toEpoch()),
'host': Network.getFQDN(),
'componentType': "agent",
'component': "_".join(self.__moduleProperties['fullName'].split("/")),
'cycleDuration': elapsedTime,
'cycles': 1
})
else:
self.log.warn(" Cycle had an error:", cycleResult['Message'])
self.log.notice("-" * 40)
# Update number of cycles
if not self.activityMonitoring:
self.monitor.setComponentExtraParam('cycles', self.__moduleProperties['cyclesDone'])
# cycle finished successfully, cancel watchdog
if watchdogInt > 0:
signal.alarm(0)
return cycleResult
def _startReportToMonitoring(self):
try:
if not self.activityMonitoring:
now = time.time()
stats = os.times()
cpuTime = stats[0] + stats[2]
if now - self.__monitorLastStatsUpdate < 10:
return (now, cpuTime)
# Send CPU consumption mark
self.__monitorLastStatsUpdate = now
# Send Memory consumption mark
membytes = MemStat.VmB('VmRSS:')
if membytes:
mem = membytes / (1024. * 1024.)
gMonitor.addMark('MEM', mem)
return(now, cpuTime)
else:
return False
except Exception:
return False
def _endReportToMonitoring(self, initialWallTime, initialCPUTime):
wallTime = time.time() - initialWallTime
stats = os.times()
cpuTime = stats[0] + stats[2] - initialCPUTime
percentage = 0
if wallTime:
percentage = cpuTime / wallTime * 100.
if percentage > 0:
gMonitor.addMark('CPU', percentage)
def __executeModuleCycle(self):
# Execute the beginExecution function
result = self.am_secureCall(self.beginExecution, name="beginExecution")
if not result['OK']:
return result
# Launch executor functions
executors = self.__moduleProperties['executors']
if len(executors) == 1:
result = self.am_secureCall(executors[0][0], executors[0][1])
if not result['OK']:
return result
else:
exeThreads = [threading.Thread(target=executor[0], args=executor[1]) for executor in executors]
for thread in exeThreads:
thread.setDaemon(1)
thread.start()
for thread in exeThreads:
thread.join()
# Execute the endExecution function
return self.am_secureCall(self.endExecution, name="endExecution")
def initialize(self, *args, **kwargs):
""" Agents should override this method for specific initialization.
Executed at every agent (re)start.
"""
return S_OK()
def beginExecution(self):
return S_OK()
def endExecution(self):
return S_OK()
def finalize(self):
return S_OK()
def execute(self):
return S_ERROR("Execute method has to be overwritten by agent module")
def __activityMonitoringReporting(self):
""" This method is called by the ThreadScheduler as a periodic task in order to commit the collected data which
is done by the MonitoringReporter and is send to the 'ComponentMonitoring' type.
:return: True / False
"""
result = self.activityMonitoringReporter.commit()
return result['OK']
| yujikato/DIRAC | src/DIRAC/Core/Base/AgentModule.py | Python | gpl-3.0 | 20,512 | [
"DIRAC"
] | 157cb4fa27397f18f3332802b6fdc821f49d7c9702dca417cce7561344b53772 |
from spectrum import *
from numpy.testing import assert_almost_equal
import pytest
def test_class_Window():
w = Window(65, name='hann')
w.enbw
w.mean_square
w.frequencies
w.plot_time_freq()
w.compute_response(NFFT=32)
try:
w = Window(64, name='wrong')
assert False
except:
assert True
w.info()
print(w)
# recompute response
w = Window(65, name='hann')
w.response
w.plot_frequencies(maxdB=120, mindB=-100)
try:
w = Window(65, name="dummy")
assert False
except:
assert True
#unittest of create_window
def test_create_window_error():
try:
create_window('dummy')
assert False
except:
assert True
#test that create_window(N, name) works for all valid names
@pytest.mark.parametrize('test_window_name,length',
[(name, size) for name, size in zip(window_names.keys(), [1,51,52])])
def test_create_window(test_window_name, length):
create_window(length, name=test_window_name)
#test that create_window(N, name) is indeed equivalent to the direct call window_name
@pytest.mark.parametrize("name,param",
[('blackman', {'alpha':2}),
('kaiser', {'beta':8.6}),
('gaussian', {'alpha':2.5}),
('chebwin', {'attenuation': 50}),
('flattop', {'mode':'symmetric'}),
('tukey', {'r': 0.5}),
('poisson', {'alpha': 2}),
('poisson_hanning', {'alpha': 2}),
('cauchy', {'alpha': 3})])
def test_check_window_switch(name, param):
f = eval('window_'+name)
w1 = f(64, **param)
w2 = create_window(64, name, **param)
for x,y in zip(w1,w2):
assert x==y
def test_create_window_others():
try:
create_window(11, "hamming", dummy=1)
assert False
except ValueError:
assert True
try:
create_window(11, "kaiser", beta=0.5)
create_window(11, "kaiser", dummy=1)
assert False
except ValueError:
assert True
def test_bartlett():
"""unit and functional test window_bartlett"""
vec7 = array([ 0., 0.33333333, 0.66666667, 1., 0.66666667, 0.33333333, 0.])
vec8 = array([ 0., 0.28571429, 0.57142857, 0.85714286, 0.85714286, 0.57142857, 0.28571429, 0.])
for x, y in zip(window_bartlett(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_bartlett(7), vec7):
assert_almost_equal(x, y)
def test_kaiser():
"""unit and functional test window_kaiser"""
vec8 = array([ 0.00133251, 0.09113651, 0.45964377, 0.92046158, 0.92046158, 0.45964377, 0.09113651, 0.00133251])
vec7 = array([ 0.00133251, 0.13040195, 0.63041193, 1. , 0.63041193, 0.13040195, 0.00133251])
for x, y in zip(window_kaiser(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_kaiser(7), vec7):
assert_almost_equal(x, y)
window_kaiser(8, method='other')
window_kaiser(8, method='numpy')
assert window_kaiser(1, method='numpy') == np.array([1])
def test_blackman():
"""unit and functional test window_bartlett"""
vec8 = array([ -1.38777878e-17, 9.04534244e-02, 4.59182958e-01, 9.20363618e-01, 9.20363618e-01, 4.59182958e-01, 9.04534244e-02, -1.38777878e-17])
vec7 = array([ -1.38777878e-17, 1.30000000e-01, 6.30000000e-01, 1.00000000e+00, 6.30000000e-01, 1.30000000e-01, -1.38777878e-17])
for x, y in zip(window_blackman(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_blackman(7), vec7):
assert_almost_equal(x, y)
for x, y in zip(window_blackman(7, alpha=0.16), vec7):
assert_almost_equal(x, y)
assert window_blackman(1, alpha=0.16) == np.array([1])
def test_hann():
"""unit and functional test window_bartlett"""
vec7 = array([ 0. , 0.25, 0.75, 1. , 0.75, 0.25, 0. ])
vec8 = array([ 0. , 0.1882551 , 0.61126047, 0.95048443, 0.95048443, 0.61126047, 0.1882551 , 0. ])
for x, y in zip(window_hann(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_hann(7), vec7):
assert_almost_equal(x, y)
def test_hammming():
"""unit and functional test window_hamming"""
vec8 = array([ 0.08 , 0.25319469, 0.64235963, 0.95444568, 0.95444568, 0.64235963, 0.25319469, 0.08 ])
vec7 = array([ 0.08, 0.31, 0.77, 1. , 0.77, 0.31, 0.08])
for x, y in zip(window_hamming(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_hamming(7), vec7):
assert_almost_equal(x, y)
def test_chebwin():
"""unit and functional test chebwin"""
vec7 = array([ 0.1116911 , 0.41962999, 0.81377359, 1. , 0.81377359, 0.41962999, 0.1116911 ])
vec8 = array([ 0.09455132, 0.34937508, 0.71822375, 1. , 1. , 0.71822375, 0.34937508, 0.09455132])
for x, y in zip(window_checbwin(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_chebwin(7), vec7):
assert_almost_equal(x, y)
def test_gaussian():
"""unit and functional test gaussian"""
vec8 = array([ 0.09139376, 0.29502266, 0.64438872, 0.9523448 , 0.9523448 , 0.64438872, 0.29502266, 0.09139376])
vec7 = array([ 0.1006689 , 0.36044779, 0.77483743, 1. , 0.77483743, 0.36044779, 0.1006689 ])
for x, y in zip(window_gaussian(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_gaussian(7), vec7):
assert_almost_equal(x, y)
def test_cauchy():
window_cauchy(64)
def test_cosine():
window_cosine(64)
assert window_cosine(1) == np.array([1.])
def test_riemann():
window_riemann(64)
def test_lanczos():
window_lanczos(64)
assert window_lanczos(1) == np.array([1.])
def test_poisson():
window_poisson(64)
#assert window_poisson(1) == np.array([1.])
def test_poisson_hanning():
window_poisson_hanning(64)
def test_bartlett_hann():
vec7 = array([ 0. , 0.27, 0.73, 1. , 0.73, 0.27, 0. ])
vec8 = array([ 0., 0.2116453, 0.60170081, 0.92808246, 0.92808246, 0.60170081, 0.2116453, 0.])
for x, y in zip(window_bartlett_hann(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_bartlett_hann(7), vec7):
assert_almost_equal(x, y)
assert window_bartlett_hann(1) == np.array([1.])
def test_window_visu():
window_visu(64, 'hamming')
def test_enbw():
N = 64
w = create_window(N, 'rectangle')
assert enbw(w) == 1.
def test_window_parzen():
vec7 = array([ 0.0058309 , 0.1574344 , 0.65014577, 1. , 0.65014577, 0.1574344 , 0.0058309 ])
vec8 = array([ 0.00390625, 0.10546875, 0.47265625, 0.91796875, 0.91796875, 0.47265625, 0.10546875, 0.00390625])
for x, y in zip(window_parzen(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_parzen(7), vec7):
assert_almost_equal(x, y)
def test_bohman():
vec8 = array([ 3.89804309e-17, 7.07247468e-02, 4.37484012e-01, 9.10368513e-01, 9.10368513e-01, 4.37484012e-01, 7.07247468e-02, 3.89804309e-17])
vec7 = array([ 3.89804309e-17, 1.08997781e-01, 6.08997781e-01, 1.00000000e+00, 6.08997781e-01, 1.08997781e-01, 3.89804309e-17])
for x, y in zip(window_bohman(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_bohman(7), vec7):
assert_almost_equal(x, y)
def test_chebwin():
vec8 = array([ 0.09455132, 0.34937508, 0.71822375, 1., 1.,0.71822375, 0.34937508, 0.09455132])
vec7 = array([ 0.1116911 , 0.41962999, 0.81377359, 1., 0.81377359, 0.41962999, 0.1116911 ])
for x, y in zip(window_chebwin(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_chebwin(7), vec7):
assert_almost_equal(x, y)
def test_nuttall():
window_nuttall(64)
assert window_nuttall(1) == np.array([1.])
def test_blackman_nuttall():
vec7 = array([ 3.62800000e-04, 6.13345000e-02, 5.29229800e-01, 1.00000000e+00, 5.29229800e-01, 6.13345000e-02, 3.62800000e-04])
vec8 = array([ 3.62800000e-04, 3.77757690e-02, 3.42727620e-01, 8.91851861e-01, 8.91851861e-01, 3.42727620e-01, 3.77757690e-02, 3.62800000e-04])
for x, y in zip(window_blackman_nuttall(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_blackman_nuttall(7), vec7):
assert_almost_equal(x, y)
def test_blackman_harris():
vec7 = array([ 6.00000000e-05, 5.56450000e-02, 5.20575000e-01, 1.00000000e+00, 5.20575000e-01, 5.56450000e-02, 6.00000000e-05])
vec8 = array([ 6.00000000e-05, 3.33917235e-02, 3.32833504e-01, 8.89369772e-01, 8.89369772e-01, 3.32833504e-01, 3.33917235e-02, 6.00000000e-05])
for x, y in zip(window_blackman_harris(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_blackman_harris(7), vec7):
assert_almost_equal(x, y)
def test_flattop():
vec8 = array([ 9.05133399e-04, -2.64123651e-02, -5.55579501e-02, 4.43549557e-01, 1.00000000e+00, 4.43549557e-01, -5.55579501e-02, -2.64123651e-02])
for x, y in zip(window_flattop(8, 'periodic', precision='octave'), vec8):
assert_almost_equal(x, y)
assert window_flattop(1) == np.array([1.])
window_flattop(1, "periodic")
def test_tukey():
vec8 = array([ 0. , 0.61126047, 1. , 1. , 1. , 1. , 0.61126047, 0. ])
vec7 = array([ 0. , 0.75, 1. , 1. , 1. , 0.75, 0. ])
for x, y in zip(window_tukey(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_tukey(7), vec7):
assert_almost_equal(x, y)
window_tukey(64, r=0)
window_tukey(64, r=1)
assert window_tukey(1, r=1) == np.array([1.])
def test_window_rietz():
window_riesz(64)
| cokelaer/spectrum | test/test_window.py | Python | bsd-3-clause | 9,764 | [
"Gaussian"
] | 6c55017c3ba17228953863383416033a038cfc2c3304d1e3b3065ed7df18cdbe |
"""
This class brings together a L{solve.Solver} to choose a set of implmentations, a
L{fetch.Fetcher} to download additional components, and the user's configuration
settings.
@since: 0.53
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, logger
import os
from zeroinstall.injector import arch, model
from zeroinstall.injector.model import network_offline
from zeroinstall.support import tasks
class Driver(object):
"""Chooses a set of implementations based on a policy.
Typical use:
1. Create a Driver object, giving it the requirements about the program to be run.
2. Call L{solve_with_downloads}. If more information is needed, a L{fetch.Fetcher} will be used to download it.
3. When all downloads are complete, the L{solver} contains the chosen versions.
4. Use L{get_uncached_implementations} to find where to get these versions and download them
using L{download_uncached_implementations}.
@ivar target_arch: target architecture for binaries (deprecated)
@type target_arch: L{arch.Architecture}
@ivar solver: solver used to choose a set of implementations
@type solver: L{solve.Solver}
@ivar watchers: callbacks to invoke after solving
"""
__slots__ = ['watchers', 'requirements', 'config', 'target_arch', 'solver']
def __init__(self, config, requirements):
"""
@param config: The configuration settings to use
@type config: L{config.Config}
@param requirements: Details about the program we want to run
@type requirements: L{requirements.Requirements}
@since: 0.53
"""
self.watchers = []
assert config
self.config = config
assert requirements
self.requirements = requirements
self.target_arch = arch.get_architecture(requirements.os, requirements.cpu)
from zeroinstall.injector.solver import DefaultSolver
self.solver = DefaultSolver(self.config)
logger.debug(_("Supported systems: '%s'"), arch.os_ranks)
logger.debug(_("Supported processors: '%s'"), arch.machine_ranks)
if requirements.before or requirements.not_before:
self.solver.extra_restrictions[config.iface_cache.get_interface(requirements.interface_uri)] = [
model.VersionRangeRestriction(model.parse_version(requirements.before),
model.parse_version(requirements.not_before))]
def get_uncached_implementations(self):
"""List all chosen implementations which aren't yet available locally.
@rtype: [(L{model.Interface}, L{model.Implementation})]"""
iface_cache = self.config.iface_cache
stores = self.config.stores
uncached = []
for uri, selection in self.solver.selections.selections.items():
impl = selection.impl
assert impl, self.solver.selections
if not impl.is_available(stores):
uncached.append((iface_cache.get_interface(uri), impl))
return uncached
@tasks.async
def solve_with_downloads(self, force = False, update_local = False):
"""Run the solver, then download any feeds that are missing or
that need to be updated. Each time a new feed is imported into
the cache, the solver is run again, possibly adding new downloads.
@param force: whether to download even if we're already ready to run.
@param update_local: fetch PackageKit feeds even if we're ready to run."""
downloads_finished = set() # Successful or otherwise
downloads_in_progress = {} # URL -> Download
# There are three cases:
# 1. We want to run immediately if possible. If not, download all the information we can.
# (force = False, update_local = False)
# 2. We're in no hurry, but don't want to use the network unnecessarily.
# We should still update local information (from PackageKit).
# (force = False, update_local = True)
# 3. The user explicitly asked us to refresh everything.
# (force = True)
try_quick_exit = not (force or update_local)
while True:
self.solver.solve_for(self.requirements)
for w in self.watchers: w()
if try_quick_exit and self.solver.ready:
break
try_quick_exit = False
if not self.solver.ready:
force = True
for f in self.solver.feeds_used:
if f in downloads_finished or f in downloads_in_progress:
continue
if os.path.isabs(f):
if force:
self.config.iface_cache.get_feed(f, force = True)
downloads_in_progress[f] = tasks.IdleBlocker('Refresh local feed')
continue
elif f.startswith('distribution:'):
if force or update_local:
downloads_in_progress[f] = self.config.fetcher.download_and_import_feed(f, self.config.iface_cache)
elif force and self.config.network_use != network_offline:
downloads_in_progress[f] = self.config.fetcher.download_and_import_feed(f, self.config.iface_cache)
# Once we've starting downloading some things,
# we might as well get them all.
force = True
if not downloads_in_progress:
if self.config.network_use == network_offline:
logger.info(_("Can't choose versions and in off-line mode, so aborting"))
break
# Wait for at least one download to finish
blockers = downloads_in_progress.values()
yield blockers
tasks.check(blockers, self.config.handler.report_error)
for f in list(downloads_in_progress.keys()):
if f in downloads_in_progress and downloads_in_progress[f].happened:
del downloads_in_progress[f]
downloads_finished.add(f)
# Need to refetch any "distribution" feed that
# depends on this one
distro_feed_url = 'distribution:' + f
if distro_feed_url in downloads_finished:
downloads_finished.remove(distro_feed_url)
if distro_feed_url in downloads_in_progress:
del downloads_in_progress[distro_feed_url]
@tasks.async
def solve_and_download_impls(self, refresh = False, select_only = False):
"""Run L{solve_with_downloads} and then get the selected implementations too.
@raise SafeException: if we couldn't select a set of implementations
@since: 0.40"""
refreshed = self.solve_with_downloads(refresh)
if refreshed:
yield refreshed
tasks.check(refreshed)
if not self.solver.ready:
raise self.solver.get_failure_reason()
if not select_only:
downloaded = self.download_uncached_implementations()
if downloaded:
yield downloaded
tasks.check(downloaded)
def need_download(self):
"""Decide whether we need to download anything (but don't do it!)
@return: true if we MUST download something (feeds or implementations)
@rtype: bool"""
self.solver.solve_for(self.requirements)
for w in self.watchers: w()
if not self.solver.ready:
return True # Maybe a newer version will work?
if self.get_uncached_implementations():
return True
return False
def download_uncached_implementations(self):
"""Download all implementations chosen by the solver that are missing from the cache."""
assert self.solver.ready, "Solver is not ready!\n%s" % self.solver.selections
stores = self.config.stores
return self.config.fetcher.download_impls([impl for impl in self.solver.selections.values() if not impl.is_available(stores)],
stores)
| timdiels/0install | zeroinstall/injector/driver.py | Python | lgpl-2.1 | 7,057 | [
"VisIt"
] | bf40e8156b9dc95f533d8747e42d2bc9f06545b42fc06814cf3f607055be875d |
#!/usr/bin/env python
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Main Madpack installation executable.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import sys
import getpass
import re
import os
import glob
import traceback
import subprocess
import datetime
import tempfile
import shutil
import unittest
from upgrade_util import ChangeHandler
from upgrade_util import ViewDependency
from upgrade_util import TableDependency
from upgrade_util import ScriptCleaner
from itertools import izip_longest
# Required Python version
py_min_ver = [2, 6]
# Check python version
if sys.version_info[:2] < py_min_ver:
print("ERROR: python version too old (%s). You need %s or greater." %
('.'.join(str(i) for i in sys.version_info[:3]), '.'.join(str(i) for i in py_min_ver)))
exit(1)
# Find MADlib root directory. This file is installed to
# $MADLIB_ROOT/madpack/madpack.py, so to get $MADLIB_ROOT we need to go
# two levels up in the directory hierarchy. We use (a) os.path.realpath and
# (b) __file__ (instead of sys.argv[0]) because madpack.py could be called
# (a) through a symbolic link and (b) not as the main module.
maddir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/..") # MADlib root dir
sys.path.append(maddir + "/madpack")
# Import MADlib python modules
import argparse
import configyml
# Some read-only variables
this = os.path.basename(sys.argv[0]) # name of this script
# Default directories
maddir_conf = maddir + "/config" # Config dir
maddir_lib = maddir + "/lib/libmadlib.so" # C/C++ libraries
# Read the config files
ports = configyml.get_ports(maddir_conf) # object made of Ports.yml
rev = configyml.get_version(maddir_conf) # MADlib OS-level version
portid_list = []
for port in ports:
portid_list.append(port)
SUPPORTED_PORTS = ('postgres', 'greenplum', 'hawq')
# Global variables
portid = None # Target port ID (eg: pg90, gp40)
dbconn = None # DB Connection object
dbver = None # DB version
con_args = {} # DB connection arguments
verbose = None # Verbose flag
keeplogs = None
tmpdir = None
is_hawq2 = False
def _make_dir(dir):
"""
# Create a temp dir
# @param dir temp directory path
"""
if not os.path.isdir(dir):
try:
os.makedirs(dir)
except:
print "ERROR: can not create directory: %s. Check permissions." % dir
exit(1)
# ------------------------------------------------------------------------------
def _error(msg, stop):
"""
Error message wrapper
@param msg error message
@param stop program exit flag
"""
# Print to stdout
print this + ' : ERROR : ' + msg
# stack trace is not printed
if stop:
exit(2)
# ------------------------------------------------------------------------------
def _info(msg, verbose=True):
"""
Info message wrapper (verbose)
@param msg info message
@param verbose prints only if True
"""
# Print to stdout
if verbose:
print this + ' : INFO : ' + msg
# ------------------------------------------------------------------------------
def run_query(sql, show_error, con_args=con_args):
# Define sqlcmd
sqlcmd = 'psql'
delimiter = ' <$madlib_delimiter$> '
# Test the DB cmd line utility
std, err = subprocess.Popen(['which', sqlcmd], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if std == '':
_error("Command not found: %s" % sqlcmd, True)
# Run the query
runcmd = [sqlcmd,
'-h', con_args['host'].split(':')[0],
'-p', con_args['host'].split(':')[1],
'-d', con_args['database'],
'-U', con_args['user'],
'-F', delimiter,
'--no-password',
'--no-psqlrc',
'--no-align',
'-c', sql]
runenv = os.environ
if 'password' in con_args:
runenv["PGPASSWORD"] = con_args['password']
runenv["PGOPTIONS"] = '-c search_path=public -c client_min_messages=error'
std, err = subprocess.Popen(runcmd, env=runenv, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if err:
if show_error:
_error("SQL command failed: \nSQL: %s \n%s" % (sql, err), False)
if 'password' in err:
raise EnvironmentError
else:
raise Exception
# Convert the delimited output into a dictionary
results = [] # list of rows
i = 0
for line in std.splitlines():
if i == 0:
cols = [name for name in line.split(delimiter)]
else:
row = {} # dict of col_name:col_value pairs
c = 0
for val in line.split(delimiter):
row[cols[c]] = val
c += 1
results.insert(i, row)
i += 1
# Drop the last line: "(X rows)"
try:
results.pop()
except:
pass
return results
# ------------------------------------------------------------------------------
def _internal_run_query(sql, show_error):
"""
Runs a SQL query on the target platform DB
using the default command-line utility.
Very limited:
- no text output with "new line" characters allowed
@param sql query text to execute
@param show_error displays the SQL error msg
"""
return run_query(sql, show_error, con_args)
# ------------------------------------------------------------------------------
def _get_relative_maddir(maddir, port):
""" Return a relative path version of maddir
GPDB and HAWQ installations have a symlink outside of GPHOME that
links to the current GPHOME. After a DB upgrade, this symlink is updated to
the new GPHOME.
'maddir_lib', which uses the absolute path of GPHOME, is hardcoded into each
madlib function definition. Replacing the GPHOME path with the equivalent
relative path makes it simpler to perform DB upgrades without breaking MADlib.
"""
if port not in ('greenplum', 'hawq'):
# do nothing for postgres
return maddir
# e.g. maddir_lib = $GPHOME/madlib/Versions/1.9/lib/libmadlib.so
# 'madlib' is supposed to be in this path, which is the default folder
# used by GPPKG to install madlib
try:
abs_gphome, tail = maddir.split('madlib/')
except ValueError:
return maddir
link_name = 'greenplum-db' if port == 'greenplum' else 'hawq'
# Check outside $GPHOME if there is a symlink to this absolute path
# os.pardir is equivalent to ..
# os.path.normpath removes the extraneous .. from that path
rel_gphome = os.path.normpath(os.path.join(abs_gphome, os.pardir, link_name))
if os.path.islink(rel_gphome) and os.path.realpath(rel_gphome) == os.path.realpath(abs_gphome):
# if the relative link exists and is pointing to current location
return os.path.join(rel_gphome, 'madlib', tail)
else:
return maddir
# ------------------------------------------------------------------------------
def _run_sql_file(schema, maddir_mod_py, module, sqlfile,
tmpfile, logfile, pre_sql, upgrade=False,
sc=None):
"""
Run SQL file
@param schema name of the target schema
@param maddir_mod_py name of the module dir with Python code
@param module name of the module
@param sqlfile name of the file to parse
@param tmpfile name of the temp file to run
@param logfile name of the log file (stdout)
@param pre_sql optional SQL to run before executing the file
@param upgrade are we upgrading as part of this sql run
@param sc object of ScriptCleaner
"""
# Check if the SQL file exists
if not os.path.isfile(sqlfile):
_error("Missing module SQL file (%s)" % sqlfile, False)
raise ValueError("Missing module SQL file (%s)" % sqlfile)
# Prepare the file using M4
try:
f = open(tmpfile, 'w')
# Add the before SQL
if pre_sql:
f.writelines([pre_sql, '\n\n'])
f.flush()
# Find the madpack dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/madpack"):
maddir_madpack = maddir + "/ports/" + portid + "/" + dbver + "/madpack"
else:
maddir_madpack = maddir + "/madpack"
maddir_ext_py = maddir + "/lib/python"
m4args = ['m4',
'-P',
'-DMADLIB_SCHEMA=' + schema,
'-DPLPYTHON_LIBDIR=' + maddir_mod_py,
'-DEXT_PYTHON_LIBDIR=' + maddir_ext_py,
'-DMODULE_PATHNAME=' + maddir_lib,
'-DMODULE_NAME=' + module,
'-I' + maddir_madpack,
sqlfile]
_info("> ... parsing: " + " ".join(m4args), verbose)
subprocess.call(m4args, stdout=f)
f.close()
except:
_error("Failed executing m4 on %s" % sqlfile, False)
raise Exception
# Only update function definition
sub_module = ''
if upgrade:
# get filename from complete path without the extension
sub_module = os.path.splitext(os.path.basename(sqlfile))[0]
_info(sub_module, False)
if sub_module not in sc.get_change_handler().newmodule:
sql = open(tmpfile).read()
sql = sc.cleanup(sql)
open(tmpfile, 'w').write(sql)
# Run the SQL using DB command-line utility
if portid in ('greenplum', 'postgres', 'hawq'):
sqlcmd = 'psql'
# Test the DB cmd line utility
std, err = subprocess.Popen(['which', sqlcmd], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if not std:
_error("Command not found: %s" % sqlcmd, True)
runcmd = [sqlcmd, '-a',
'-v', 'ON_ERROR_STOP=1',
'-h', con_args['host'].split(':')[0],
'-p', con_args['host'].split(':')[1],
'-d', con_args['database'],
'-U', con_args['user'],
'--no-password',
'-f', tmpfile]
runenv = os.environ
if 'password' in con_args:
runenv["PGPASSWORD"] = con_args['password']
runenv["PGOPTIONS"] = '-c client_min_messages=notice'
# Open log file
try:
log = open(logfile, 'w')
except:
_error("Cannot create log file: %s" % logfile, False)
raise Exception
# Run the SQL
try:
_info("> ... executing " + tmpfile, verbose)
retval = subprocess.call(runcmd, env=runenv, stdout=log, stderr=log)
except:
_error("Failed executing %s" % tmpfile, False)
raise Exception
finally:
log.close()
return retval
# ------------------------------------------------------------------------------
def _get_madlib_dbrev(schema):
"""
Read MADlib version from database
@param dbconn database conection object
@param schema MADlib schema name
"""
try:
row = _internal_run_query("SELECT count(*) AS cnt FROM pg_tables " +
"WHERE schemaname='" + schema + "' AND " +
"tablename='migrationhistory'", True)
if int(row[0]['cnt']) > 0:
row = _internal_run_query("""SELECT version FROM %s.migrationhistory
ORDER BY applied DESC LIMIT 1""" % schema, True)
if row:
return row[0]['version']
except:
_error("Failed reading MADlib db version", True)
return None
# ------------------------------------------------------------------------------
def _get_dbver():
""" Read version number from database (of form X.Y) """
try:
versionStr = _internal_run_query("SELECT pg_catalog.version()", True)[0]['version']
if portid == 'postgres':
match = re.search("PostgreSQL[a-zA-Z\s]*(\d+\.\d+)", versionStr)
elif portid == 'greenplum':
# for Greenplum the 3rd digit is necessary to differentiate
# 4.3.5+ from versions < 4.3.5
match = re.search("Greenplum[a-zA-Z\s]*(\d+\.\d+\.\d+)", versionStr)
elif portid == 'hawq':
match = re.search("HAWQ[a-zA-Z\s]*(\d+\.\d+)", versionStr)
return None if match is None else match.group(1)
except:
_error("Failed reading database version", True)
# ------------------------------------------------------------------------------
def _check_db_port(portid):
"""
Make sure we are connected to the expected DB platform
@param portid expected DB port id - to be validates
"""
# Postgres
try:
row = _internal_run_query("SELECT version() AS version", True)
except:
_error("Cannot validate DB platform type", True)
if row and row[0]['version'].lower().find(portid) >= 0:
if portid == 'postgres':
if row[0]['version'].lower().find('greenplum') < 0:
return True
elif portid == 'greenplum':
if row[0]['version'].lower().find('hawq') < 0:
return True
elif portid == 'hawq':
return True
return False
# ------------------------------------------------------------------------------
def _is_rev_gte(left, right):
""" Return if left >= right
Args:
@param left: list. Revision numbers in a list form (as returned by
_get_rev_num).
@param right: list. Revision numbers in a list form (as returned by
_get_rev_num).
Returns:
Boolean
If left and right are all numeric then regular list comparison occurs.
If either one contains a string, then comparison occurs till both have int.
First list to have a string is considered smaller
(including if the other does not have an element in corresponding index)
Examples:
[1, 9, 0] >= [1, 9, 0]
[1, 9, 1] >= [1, 9, 0]
[1, 9, 1] >= [1, 9]
[1, 10] >= [1, 9, 1]
[1, 9, 0] >= [1, 9, 0, 'dev']
[1, 9, 1] >= [1, 9, 0, 'dev']
[1, 9, 0] >= [1, 9, 'dev']
[1, 9, 'rc'] >= [1, 9, 'dev']
[1, 9, 'rc', 0] >= [1, 9, 'dev', 1]
[1, 9, 'rc', '1'] >= [1, 9, 'rc', '1']
"""
def all_numeric(l):
return not l or all(isinstance(i, int) for i in l)
if all_numeric(left) and all_numeric(right):
return left >= right
else:
for i, (l_e, r_e) in enumerate(izip_longest(left, right)):
if isinstance(l_e, int) and isinstance(r_e, int):
if l_e == r_e:
continue
else:
return l_e > r_e
elif isinstance(l_e, int) or isinstance(r_e, int):
# [1, 9, 0] > [1, 9, 'dev']
# [1, 9, 0] > [1, 9]
return isinstance(l_e, int)
else:
# both are not int
if r_e is None:
# [1, 9, 'dev'] < [1, 9]
return False
else:
return l_e is None or left[i:] >= right[i:]
return True
# ----------------------------------------------------------------------
def _get_rev_num(rev):
"""
Convert version string into number for comparison
@param rev version text
It is expected to follow Semantic Versioning (semver.org)
Valid inputs:
1.9.0, 1.10.0, 2.5.0
1.0.0-alpha, 1.0.0-alpha.1, 1.0.0-0.3.7, 1.0.0-x.7.z.92
1.0.0+20130313144700, 1.0.0-beta+exp.sha.5114f85
"""
try:
rev_parts = re.split('[-+_]', rev)
# get numeric part of the version string
num = [int(i) for i in rev_parts[0].split('.')]
num += [0] * (3 - len(num)) # normalize num to be of length 3
# get identifier part of the version string
if len(rev_parts) > 1:
num.extend(map(str, rev_parts[1:]))
if not num:
num = [0]
return num
except:
# invalid revision
return [0]
# ------------------------------------------------------------------------------
def _print_revs(rev, dbrev, con_args, schema):
"""
Print version information
@param rev OS-level MADlib version
@param dbrev DB-level MADlib version
@param con_args database connection arguments
@param schema MADlib schema name
"""
_info("MADlib tools version = %s (%s)" % (str(rev), sys.argv[0]), True)
if con_args:
try:
_info("MADlib database version = %s (host=%s, db=%s, schema=%s)"
% (dbrev, con_args['host'], con_args['database'], schema), True)
except:
_info("MADlib database version = [Unknown] (host=%s, db=%s, schema=%s)"
% (dbrev, con_args['host'], con_args['database'], schema), True)
return
# ------------------------------------------------------------------------------
def _plpy_check(py_min_ver):
"""
Check pl/python existence and version
@param py_min_ver min Python version to run MADlib
"""
_info("Testing PL/Python environment...", True)
# Check PL/Python existence
rv = _internal_run_query("SELECT count(*) AS CNT FROM pg_language "
"WHERE lanname = 'plpythonu'", True)
if int(rv[0]['cnt']) > 0:
_info("> PL/Python already installed", verbose)
else:
_info("> PL/Python not installed", verbose)
_info("> Creating language PL/Python...", True)
try:
_internal_run_query("CREATE LANGUAGE plpythonu;", True)
except:
_error('Cannot create language plpythonu. Stopping installation...', False)
raise Exception
# Check PL/Python version
_internal_run_query("DROP FUNCTION IF EXISTS plpy_version_for_madlib();", False)
_internal_run_query("""
CREATE OR REPLACE FUNCTION plpy_version_for_madlib()
RETURNS TEXT AS
$$
import sys
# return '.'.join(str(item) for item in sys.version_info[:3])
return str(sys.version_info[:3]).replace(',','.').replace(' ','').replace(')','').replace('(','')
$$
LANGUAGE plpythonu;
""", True)
rv = _internal_run_query("SELECT plpy_version_for_madlib() AS ver;", True)
python = rv[0]['ver']
py_cur_ver = [int(i) for i in python.split('.')]
if py_cur_ver >= py_min_ver:
_info("> PL/Python version: %s" % python, verbose)
else:
_error("PL/Python version too old: %s. You need %s or greater"
% (python, '.'.join(str(i) for i in py_min_ver)), False)
raise Exception
_internal_run_query("DROP FUNCTION IF EXISTS plpy_version_for_madlib();", False)
_info("> PL/Python environment OK (version: %s)" % python, True)
# ------------------------------------------------------------------------------
def _db_install(schema, dbrev, testcase):
"""
Install MADlib
@param schema MADlib schema name
@param dbrev DB-level MADlib version
@param testcase command-line args for a subset of modules
"""
_info("Installing MADlib into %s schema..." % schema.upper(), True)
temp_schema = schema + '_v' + ''.join(map(str, _get_rev_num(dbrev)))
# Check the status of MADlib objects in database
madlib_exists = False if dbrev is None else True
# Test if schema is writable
try:
_internal_run_query("CREATE TABLE %s.__madlib_test_table (A INT);" % schema, False)
_internal_run_query("DROP TABLE %s.__madlib_test_table;" % schema, False)
schema_writable = True
except:
schema_writable = False
# CASE #1: Target schema exists with MADlib objects:
if schema_writable and madlib_exists:
# work-around before UDT is available in HAWQ
if portid == 'hawq':
_info("***************************************************************************", True)
_info("* Schema MADLIB already exists", True)
_info("* For HAWQ, MADlib objects will be overwritten to the 'MADLIB' schema", True)
_info("* It may drop any database objects (tables, views, etc.) that depend on 'MADLIB' SCHEMA!!!!!!!!!!!!!", True)
_info("***************************************************************************", True)
_info("Would you like to continue? [Y/N]", True)
go = raw_input('>>> ').upper()
while go not in ('Y', 'YES', 'N', 'NO'):
go = raw_input('Yes or No >>> ').upper()
if go in ('N', 'NO'):
_info('Installation stopped.', True)
return
# Rolling back in HAWQ will drop catalog functions. For exception, we
# simply push the exception to the caller to terminate the install
_db_create_objects(schema, None, testcase=testcase, hawq_debug=True)
else:
_info("***************************************************************************", True)
_info("* Schema %s already exists" % schema.upper(), True)
_info("* Installer will rename it to %s" % temp_schema.upper(), True)
_info("***************************************************************************", True)
_info("Would you like to continue? [Y/N]", True)
go = raw_input('>>> ').upper()
while go not in ('Y', 'YES', 'N', 'NO'):
go = raw_input('Yes or No >>> ').upper()
if go in ('N', 'NO'):
_info('Installation stopped.', True)
return
# Rename MADlib schema
_db_rename_schema(schema, temp_schema)
# Create MADlib schema
try:
_db_create_schema(schema)
except:
_db_rollback(schema, temp_schema)
# Create MADlib objects
try:
_db_create_objects(schema, temp_schema, testcase=testcase)
except:
_db_rollback(schema, temp_schema)
# CASE #2: Target schema exists w/o MADlib objects:
# For HAWQ, after the DB initialization, there is no
# madlib.migrationhistory table, thus madlib_exists is False
elif schema_writable and not madlib_exists:
# Create MADlib objects
try:
_db_create_objects(schema, None, testcase=testcase)
except:
_error("Building database objects failed. "
"Before retrying: drop %s schema OR install MADlib into "
"a different schema." % schema.upper(), True)
#
# CASE #3: Target schema does not exist:
#
elif not schema_writable:
if portid == 'hawq' and not is_hawq2:
# Rolling back in HAWQ will drop catalog functions. For exception, we
# simply push the exception to the caller to terminate the install
raise Exception("MADLIB schema is required for HAWQ")
_info("> Schema %s does not exist" % schema.upper(), verbose)
# Create MADlib schema
try:
_db_create_schema(schema)
except:
_db_rollback(schema, None)
# Create MADlib objects
try:
_db_create_objects(schema, None, testcase=testcase)
except:
_db_rollback(schema, None)
_info("MADlib %s installed successfully in %s schema." % (str(rev), schema.upper()), True)
# ------------------------------------------------------------------------------
def _db_upgrade(schema, dbrev):
"""
Upgrade MADlib
@param schema MADlib schema name
@param dbrev DB-level MADlib version
"""
if _is_rev_gte(_get_rev_num(dbrev), _get_rev_num(rev)):
_info("Current MADlib version already up to date.", True)
return
if _is_rev_gte([1,8],_get_rev_num(dbrev)):
_error("""
MADlib versions prior to v1.9 are not supported for upgrade.
Please try upgrading to v1.9.1 and then upgrade to this version.
""", True)
return
_info("Upgrading MADlib into %s schema..." % schema.upper(), True)
_info("\tDetecting dependencies...", True)
_info("\tLoading change list...", True)
ch = ChangeHandler(schema, portid, con_args, maddir, dbrev, is_hawq2)
_info("\tDetecting table dependencies...", True)
td = TableDependency(schema, portid, con_args)
_info("\tDetecting view dependencies...", True)
vd = ViewDependency(schema, portid, con_args)
abort = False
if td.has_dependency():
_info("*" * 50, True)
_info("\tFollowing user tables/indexes are dependent on MADlib objects:", True)
_info(td.get_dependency_str(), True)
_info("*" * 50, True)
cd_udt = [udt for udt in td.get_depended_udt() if udt in ch.udt]
if len(cd_udt) > 0:
_error("""
User has objects dependent on following updated MADlib types!
{0}
These objects need to be dropped before upgrading.
""".format('\n\t\t\t'.join(cd_udt)), False)
# we add special handling for 'linregr_result'
if 'linregr_result' in cd_udt:
_info("""Dependency on 'linregr_result' could be due to objects
created from the output of the aggregate 'linregr'.
Please refer to the Linear Regression documentation
<http://madlib.apache.org/docs/latest/group__grp__linreg.html#warning>
for the recommended solution.
""", False)
abort = True
c_udoc = ch.get_udoc_oids()
d_udoc = td.get_depended_udoc_oids()
cd_udoc = [udoc for udoc in d_udoc if udoc in c_udoc]
if len(cd_udoc) > 0:
_error("""
User has objects dependent on the following updated MADlib operator classes!
oid={0}
These objects need to be dropped before upgrading.
""".format('\n\t\t\t'.join(cd_udoc)), False)
abort = True
if vd.has_dependency():
_info("*" * 50, True)
_info("\tFollowing user views are dependent on MADlib objects:", True)
_info(vd.get_dependency_graph_str(), True)
_info("*" * 50, True)
c_udf = ch.get_udf_signature()
d_udf = vd.get_depended_func_signature('UDF')
cd_udf = [udf for udf in d_udf if udf in c_udf]
if len(cd_udf) > 0:
_error("""
User has objects dependent on following updated MADlib functions!
{0}
These objects will fail to work with the updated functions and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_udf)), False)
abort = True
c_uda = ch.get_uda_signature()
d_uda = vd.get_depended_func_signature('UDA')
cd_uda = [uda for uda in d_uda if uda in c_uda]
if len(cd_uda) > 0:
_error("""
User has objects dependent on following updated MADlib aggregates!
{0}
These objects will fail to work with the new aggregates and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_uda)), False)
abort = True
c_udo = ch.get_udo_oids()
d_udo = vd.get_depended_opr_oids()
cd_udo = [udo for udo in d_udo if udo in c_udo]
if len(cd_udo) > 0:
_error("""
User has objects dependent on following updated MADlib operators!
oid={0}
These objects will fail to work with the new operators and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_udo)), False)
abort = True
if abort:
_error("""------- Upgrade aborted. -------
Backup and drop all objects that depend on MADlib before trying upgrade again.
Use madpack reinstall to automatically drop these objects only if appropriate.""", True)
else:
_info("No dependency problem found, continuing to upgrade ...", True)
_info("\tReading existing UDAs/UDTs...", False)
sc = ScriptCleaner(schema, portid, con_args, ch)
_info("Script Cleaner initialized ...", False)
ch.drop_changed_uda()
ch.drop_changed_udoc()
ch.drop_changed_udo()
ch.drop_changed_udc()
ch.drop_changed_udf()
ch.drop_changed_udt() # assume dependent udf for udt does not change
ch.drop_traininginfo_4dt() # used types: oid, text, integer, float
_db_create_objects(schema, None, True, sc)
_info("MADlib %s upgraded successfully in %s schema." % (str(rev), schema.upper()), True)
# ------------------------------------------------------------------------------
def _db_rename_schema(from_schema, to_schema):
"""
Rename schema
@param from_schema name of the schema to rename
@param to_schema new name for the schema
"""
_info("> Renaming schema %s to %s" % (from_schema.upper(), to_schema.upper()), True)
try:
_internal_run_query("ALTER SCHEMA %s RENAME TO %s;" % (from_schema, to_schema), True)
except:
_error('Cannot rename schema. Stopping installation...', False)
raise Exception
# ------------------------------------------------------------------------------
def _db_create_schema(schema):
"""
Create schema
@param from_schema name of the schema to rename
@param to_schema new name for the schema
"""
_info("> Creating %s schema" % schema.upper(), True)
try:
_internal_run_query("CREATE SCHEMA %s;" % schema, True)
except:
_info('Cannot create new schema. Rolling back installation...', True)
pass
# ------------------------------------------------------------------------------
def _db_create_objects(schema, old_schema, upgrade=False, sc=None, testcase="",
hawq_debug=False):
"""
Create MADlib DB objects in the schema
@param schema Name of the target schema
@param sc ScriptCleaner object
@param testcase Command-line args for modules to install
@param hawq_debug
"""
if not upgrade and not hawq_debug:
# Create MigrationHistory table
try:
_info("> Creating %s.MigrationHistory table" % schema.upper(), True)
_internal_run_query("DROP TABLE IF EXISTS %s.migrationhistory;" % schema, True)
sql = """CREATE TABLE %s.migrationhistory
(id serial, version varchar(255),
applied timestamp default current_timestamp);""" % schema
_internal_run_query(sql, True)
except:
_error("Cannot crate MigrationHistory table", False)
raise Exception
# Copy MigrationHistory table for record keeping purposes
if old_schema:
try:
_info("> Saving data from %s.MigrationHistory table" % old_schema.upper(), True)
sql = """INSERT INTO %s.migrationhistory (version, applied)
SELECT version, applied FROM %s.migrationhistory
ORDER BY id;""" % (schema, old_schema)
_internal_run_query(sql, True)
except:
_error("Cannot copy MigrationHistory table", False)
raise Exception
# Stamp the DB installation
try:
_info("> Writing version info in MigrationHistory table", True)
_internal_run_query("INSERT INTO %s.migrationhistory(version) "
"VALUES('%s')" % (schema, str(rev)), True)
except:
_error("Cannot insert data into %s.migrationhistory table" % schema, False)
raise Exception
# Run migration SQLs
if upgrade:
_info("> Creating/Updating objects for modules:", True)
else:
_info("> Creating objects for modules:", True)
caseset = (set([test.strip() for test in testcase.split(',')])
if testcase != "" else set())
modset = {}
for case in caseset:
if case.find('/') > -1:
[mod, algo] = case.split('/')
if mod not in modset:
modset[mod] = []
if algo not in modset[mod]:
modset[mod].append(algo)
else:
modset[case] = []
# Loop through all modules/modules
# portspecs is a global variable
for moduleinfo in portspecs['modules']:
# Get the module name
module = moduleinfo['name']
# Skip if doesn't meet specified modules
if modset is not None and len(modset) > 0 and module not in modset:
continue
_info("> - %s" % module, True)
# Find the Python module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/modules/" + module):
maddir_mod_py = maddir + "/ports/" + portid + "/" + dbver + "/modules"
else:
maddir_mod_py = maddir + "/modules"
# Find the SQL module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/modules/" + module):
maddir_mod_sql = maddir + "/ports/" + portid + "/modules"
elif os.path.isdir(maddir + "/modules/" + module):
maddir_mod_sql = maddir + "/modules"
else:
# This was a platform-specific module, for which no default exists.
# We can just skip this module.
continue
# Make a temp dir for log files
cur_tmpdir = tmpdir + "/" + module
_make_dir(cur_tmpdir)
# Loop through all SQL files for this module
mask = maddir_mod_sql + '/' + module + '/*.sql_in'
sql_files = glob.glob(mask)
if not sql_files:
_error("No files found in: %s" % mask, True)
# Execute all SQL files for the module
for sqlfile in sql_files:
algoname = os.path.basename(sqlfile).split('.')[0]
if portid == 'hawq' and not is_hawq2 and algoname in ('svec'):
continue
# run only algo specified
if module in modset and len(modset[module]) > 0 \
and algoname not in modset[module]:
continue
# Set file names
tmpfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.tmp'
logfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.log'
retval = _run_sql_file(schema, maddir_mod_py, module, sqlfile,
tmpfile, logfile, None, upgrade,
sc)
# Check the exit status
if retval != 0:
_error("TEST CASE RESULTed executing %s" % tmpfile, False)
_error("Check the log at %s" % logfile, False)
raise Exception
# ------------------------------------------------------------------------------
def _db_rollback(drop_schema, keep_schema):
"""
Rollback installation
@param drop_schema name of the schema to drop
@param keep_schema name of the schema to rename and keep
"""
_info("Rolling back the installation...", True)
if not drop_schema:
_error('No schema name to drop. Stopping rollback...', True)
# Drop the current schema
_info("> Dropping schema %s" % drop_schema.upper(), verbose)
try:
_internal_run_query("DROP SCHEMA %s CASCADE;" % (drop_schema), True)
except:
_error("Cannot drop schema %s. Stopping rollback..." % drop_schema.upper(), True)
# Rename old to current schema
if keep_schema:
_db_rename_schema(keep_schema, drop_schema)
_info("Rollback finished successfully.", True)
raise Exception
# ------------------------------------------------------------------------------
def unescape(string):
"""
Unescape separation characters in connection strings, i.e., remove first
backslash from "\/", "\@", "\:", and "\\".
"""
if string is None:
return None
else:
return re.sub(r'\\(?P<char>[/@:\\])', '\g<char>', string)
# ------------------------------------------------------------------------------
def parseConnectionStr(connectionStr):
"""
@brief Parse connection strings of the form
<tt>[username[/password]@][hostname][:port][/database]</tt>
Separation characters (/@:) and the backslash (\) need to be escaped.
@returns A tuple (username, password, hostname, port, database). Field not
specified will be None.
"""
match = re.search(
r'((?P<user>([^/@:\\]|\\/|\\@|\\:|\\\\)+)' +
r'(/(?P<password>([^/@:\\]|\\/|\\@|\\:|\\\\)*))?@)?' +
r'(?P<host>([^/@:\\]|\\/|\\@|\\:|\\\\)+)?' +
r'(:(?P<port>[0-9]+))?' +
r'(/(?P<database>([^/@:\\]|\\/|\\@|\\:|\\\\)+))?', connectionStr)
return (
unescape(match.group('user')),
unescape(match.group('password')),
unescape(match.group('host')),
match.group('port'),
unescape(match.group('database')))
# ------------------------------------------------------------------------------
def parse_arguments():
parser = argparse.ArgumentParser(
prog="madpack",
description='MADlib package manager (' + str(rev) + ')',
argument_default=False,
formatter_class=argparse.RawTextHelpFormatter,
epilog="""Example:
$ madpack install -s madlib -p greenplum -c gpadmin@mdw:5432/testdb
This will install MADlib objects into a Greenplum database called TESTDB
running on server MDW:5432. Installer will try to login as GPADMIN
and will prompt for password. The target schema will be MADLIB.
""")
help_msg = """One of the following options:
install : run sql scripts to load into DB
upgrade : run sql scripts to upgrade
uninstall : run sql scripts to uninstall from DB
reinstall : performs uninstall and install
version : compare and print MADlib version (binaries vs database objects)
install-check : test all installed modules
(uninstall is currently unavailable for the HAWQ port)"""
choice_list = ['install', 'update', 'upgrade', 'uninstall',
'reinstall', 'version', 'install-check']
parser.add_argument('command', metavar='COMMAND', nargs=1,
choices=choice_list, help=help_msg)
parser.add_argument(
'-c', '--conn', metavar='CONNSTR', nargs=1, dest='connstr', default=None,
help="""Connection string of the following syntax:
[user[/password]@][host][:port][/database]
If not provided default values will be derived for PostgerSQL and Greenplum:
- user: PGUSER or USER env variable or OS username
- pass: PGPASSWORD env variable or runtime prompt
- host: PGHOST env variable or 'localhost'
- port: PGPORT env variable or '5432'
- db: PGDATABASE env variable or OS username""")
parser.add_argument('-s', '--schema', nargs=1, dest='schema',
metavar='SCHEMA', default='madlib',
help="Target schema for the database objects.")
parser.add_argument('-p', '--platform', nargs=1, dest='platform',
metavar='PLATFORM', choices=portid_list,
help="Target database platform, current choices: " + str(portid_list))
parser.add_argument('-v', '--verbose', dest='verbose',
action="store_true", help="Verbose mode.")
parser.add_argument('-l', '--keeplogs', dest='keeplogs', default=False,
action="store_true", help="Do not remove installation log files.")
parser.add_argument('-d', '--tmpdir', dest='tmpdir', default='/tmp/',
help="Temporary directory location for installation log files.")
parser.add_argument('-t', '--testcase', dest='testcase', default="",
help="Module names to test, comma separated. Effective only for install-check.")
# Get the arguments
return parser.parse_args()
def main(argv):
args = parse_arguments()
global verbose
verbose = args.verbose
_info("Arguments: " + str(args), verbose)
global keeplogs
keeplogs = args.keeplogs
global tmpdir
try:
tmpdir = tempfile.mkdtemp('', 'madlib.', args.tmpdir)
except OSError, e:
tmpdir = e.filename
_error("cannot create temporary directory: '%s'." % tmpdir, True)
# Parse SCHEMA
if len(args.schema[0]) > 1:
schema = args.schema[0].lower()
else:
schema = args.schema.lower()
# Parse DB Platform (== PortID) and compare with Ports.yml
global portid
if args.platform:
try:
# Get the DB platform name == DB port id
portid = args.platform[0].lower()
ports[portid]
except:
portid = None
_error("Can not find specs for port %s" % (args.platform[0]), True)
else:
portid = None
# Parse CONNSTR (only if PLATFORM and DBAPI2 are defined)
if portid:
connStr = "" if args.connstr is None else args.connstr[0]
(c_user, c_pass, c_host, c_port, c_db) = parseConnectionStr(connStr)
# Find the default values for PG and GP
if portid in SUPPORTED_PORTS:
if c_user is None:
c_user = os.environ.get('PGUSER', getpass.getuser())
if c_pass is None:
c_pass = os.environ.get('PGPASSWORD', None)
if c_host is None:
c_host = os.environ.get('PGHOST', 'localhost')
if c_port is None:
c_port = os.environ.get('PGPORT', '5432')
if c_db is None:
c_db = os.environ.get('PGDATABASE', c_user)
# Set connection variables
global con_args
con_args['host'] = c_host + ':' + c_port
con_args['database'] = c_db
con_args['user'] = c_user
if c_pass is not None:
con_args['password'] = c_pass
# Try connecting to the database
_info("Testing database connection...", verbose)
try:
# check for password only if required
_internal_run_query("SELECT 1", False)
except EnvironmentError:
con_args['password'] = getpass.getpass("Password for user %s: " % c_user)
_internal_run_query("SELECT 1", False)
except:
_error('Failed to connect to database', True)
# Get DB version
global dbver
dbver = _get_dbver()
global is_hawq2
if portid == "hawq" and _is_rev_gte(_get_rev_num(dbver), _get_rev_num('2.0')):
is_hawq2 = True
else:
is_hawq2 = False
# HAWQ < 2.0 has hard-coded schema name 'madlib'
if portid == 'hawq' and not is_hawq2 and schema.lower() != 'madlib':
_error("*** Installation is currently restricted only to 'madlib' schema ***", True)
# update maddir to use a relative path if available
global maddir
maddir = _get_relative_maddir(maddir, portid)
# Get MADlib version in DB
dbrev = _get_madlib_dbrev(schema)
portdir = os.path.join(maddir, "ports", portid)
supportedVersions = [dirItem for dirItem in os.listdir(portdir)
if os.path.isdir(os.path.join(portdir, dirItem)) and
re.match("^\d+", dirItem)]
if dbver is None:
dbver = ".".join(
map(str, max([versionStr.split('.')
for versionStr in supportedVersions])))
_info("Could not parse version string reported by {DBMS}. Will "
"default to newest supported version of {DBMS} "
"({version}).".format(DBMS=ports[portid]['name'],
version=dbver), True)
else:
_info("Detected %s version %s." % (ports[portid]['name'], dbver),
True)
if portid == "hawq":
# HAWQ (starting 2.0) and GPDB (starting 5.0) uses semantic versioning,
# which implies all HAWQ 2.x or GPDB 5.x versions will have binary
# compatibility. Hence, we can keep single folder for all 2.X / 5.X.
if (_is_rev_gte(_get_rev_num(dbver), _get_rev_num('2.0')) and
not _is_rev_gte(_get_rev_num(dbver), _get_rev_num('3.0'))):
is_hawq2 = True
dbver = '2'
elif portid == 'greenplum':
# similar to HAWQ above, collapse all 5.X versions
if (_is_rev_gte(_get_rev_num(dbver), _get_rev_num('5.0')) and
not _is_rev_gte(_get_rev_num(dbver), _get_rev_num('6.0'))):
dbver = '5'
# Due to the ABI incompatibility between 4.3.4 and 4.3.5,
# MADlib treats 4.3.5+ as DB version 4.3ORCA which is different
# from 4.3. The name is suffixed with ORCA since optimizer (ORCA) is
# 'on' by default in 4.3.5
elif _is_rev_gte(_get_rev_num(dbver), _get_rev_num('4.3.4')):
dbver = '4.3ORCA'
else:
# only need the first two digits for <= 4.3.4
dbver = '.'.join(dbver.split('.')[:2])
if not os.path.isdir(os.path.join(portdir, dbver)):
_error("This version is not among the %s versions for which "
"MADlib support files have been installed (%s)." %
(ports[portid]['name'], ", ".join(supportedVersions)), True)
# Validate that db platform is correct
if not _check_db_port(portid):
_error("Invalid database platform specified.", True)
# Adjust MADlib directories for this port (if they exist)
global maddir_conf
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/config"):
maddir_conf = maddir + "/ports/" + portid + "/" + dbver + "/config"
else:
maddir_conf = maddir + "/config"
global maddir_lib
if os.path.isfile(maddir + "/ports/" + portid + "/" + dbver +
"/lib/libmadlib.so"):
maddir_lib = maddir + "/ports/" + portid + "/" + dbver + \
"/lib/libmadlib.so"
else:
maddir_lib = maddir + "/lib/libmadlib.so"
# Get the list of modules for this port
global portspecs
portspecs = configyml.get_modules(maddir_conf)
else:
con_args = None
dbrev = None
# Parse COMMAND argument and compare with Ports.yml
# Debugging...
# print "OS rev: " + str(rev) + " > " + str(_get_rev_num(rev))
# print "DB rev: " + str(dbrev) + " > " + str(_get_rev_num(dbrev))
# Make sure we have the necessary parameters to continue
if args.command[0] != 'version':
if not portid:
_error("Missing -p/--platform parameter.", True)
if not con_args:
_error("Unknown problem with database connection string: %s" % con_args, True)
# COMMAND: version
if args.command[0] == 'version':
_print_revs(rev, dbrev, con_args, schema)
# COMMAND: uninstall/reinstall
if args.command[0] in ('uninstall',) and (portid == 'hawq' and not is_hawq2):
_error("madpack uninstall is currently not available for HAWQ", True)
if args.command[0] in ('uninstall', 'reinstall') and (portid != 'hawq' or is_hawq2):
if _get_rev_num(dbrev) == [0]:
_info("Nothing to uninstall. No version found in schema %s." % schema.upper(), True)
return
# Find any potential data to lose
affected_objects = _internal_run_query("""
SELECT
n1.nspname AS schema,
relname AS relation,
attname AS column,
typname AS type
FROM
pg_attribute a,
pg_class c,
pg_type t,
pg_namespace n,
pg_namespace n1
WHERE
n.nspname = '%s'
AND t.typnamespace = n.oid
AND a.atttypid = t.oid
AND c.oid = a.attrelid
AND c.relnamespace = n1.oid
AND c.relkind = 'r'
ORDER BY
n1.nspname, relname, attname, typname""" % schema.lower(), True)
_info("*** Uninstalling MADlib ***", True)
_info("***********************************************************************************", True)
_info("* Schema %s and all database objects depending on it will be dropped!" % schema.upper(), True)
if affected_objects:
_info("* If you continue the following data will be lost (schema : table.column : type):", True)
for ao in affected_objects:
_info('* - ' + ao['schema'] + ' : ' + ao['relation'] + '.' +
ao['column'] + ' : ' + ao['type'], True)
_info("***********************************************************************************", True)
_info("Would you like to continue? [Y/N]", True)
go = raw_input('>>> ').upper()
while go != 'Y' and go != 'N':
go = raw_input('Yes or No >>> ').upper()
# 2) Do the uninstall/drop
if go == 'N':
_info('No problem. Nothing dropped.', True)
return
elif go == 'Y':
_info("> dropping schema %s" % schema.upper(), verbose)
try:
_internal_run_query("DROP SCHEMA %s CASCADE;" % (schema), True)
except:
_error("Cannot drop schema %s." % schema.upper(), True)
_info('Schema %s (and all dependent objects) has been dropped.' % schema.upper(), True)
_info('MADlib uninstalled successfully.', True)
else:
return
# COMMAND: install/reinstall
if args.command[0] in ('install', 'reinstall'):
# Refresh MADlib version in DB, None for GP/PG
if args.command[0] == 'reinstall':
print "Setting MADlib database version to be None for reinstall"
dbrev = None
_info("*** Installing MADlib ***", True)
# 1) Compare OS and DB versions.
# noop if OS <= DB.
_print_revs(rev, dbrev, con_args, schema)
if _is_rev_gte(_get_rev_num(dbrev), _get_rev_num(rev)):
_info("Current MADlib version already up to date.", True)
return
# proceed to create objects if nothing installed in DB or for HAWQ < 2.0
elif dbrev is None or (portid == 'hawq' and not is_hawq2):
pass
# error and refer to upgrade if OS > DB
else:
_error("""Aborting installation: existing MADlib version detected in {0} schema
To upgrade the {0} schema to MADlib v{1} please run the following command:
madpack upgrade -s {0} -p {2} [-c ...]
""".format(schema, rev, portid), True)
# 2) Run installation
try:
_plpy_check(py_min_ver)
_db_install(schema, dbrev, args.testcase)
except:
_error("MADlib installation failed.", True)
# COMMAND: upgrade
if args.command[0] in ('upgrade', 'update'):
_info("*** Upgrading MADlib ***", True)
dbrev = _get_madlib_dbrev(schema)
# 1) Check DB version. If None, nothing to upgrade.
if not dbrev:
_info("MADlib is not installed in {schema} schema and there "
"is nothing to upgrade. Please use install "
"instead.".format(schema=schema.upper()),
True)
return
# 2) Compare OS and DB versions. Continue if OS > DB.
_print_revs(rev, dbrev, con_args, schema)
if _is_rev_gte(_get_rev_num(dbrev), _get_rev_num(rev)):
_info("Current MADlib version is already up-to-date.", True)
return
if float('.'.join(dbrev.split('.')[0:2])) < 1.0:
_info("The version gap is too large, upgrade is supported only for "
"packages greater than or equal to v1.0.", True)
return
# 3) Run upgrade
try:
_plpy_check(py_min_ver)
_db_upgrade(schema, dbrev)
except Exception as e:
# Uncomment the following lines when debugging
print "Exception: " + str(e)
print sys.exc_info()
traceback.print_tb(sys.exc_info()[2])
_error("MADlib upgrade failed.", True)
# COMMAND: install-check
if args.command[0] == 'install-check':
# 1) Compare OS and DB versions. Continue if OS = DB.
if _get_rev_num(dbrev) != _get_rev_num(rev):
_print_revs(rev, dbrev, con_args, schema)
_info("Versions do not match. Install-check stopped.", True)
return
# Create install-check user
test_user = ('madlib_' +
rev.replace('.', '').replace('-', '_') +
'_installcheck')
try:
_internal_run_query("DROP USER IF EXISTS %s;" % (test_user), False)
except:
_internal_run_query("DROP OWNED BY %s CASCADE;" % (test_user), True)
_internal_run_query("DROP USER IF EXISTS %s;" % (test_user), True)
_internal_run_query("CREATE USER %s;" % (test_user), True)
_internal_run_query("GRANT USAGE ON SCHEMA %s TO %s;" % (schema, test_user), True)
# 2) Run test SQLs
_info("> Running test scripts for:", verbose)
caseset = (set([test.strip() for test in args.testcase.split(',')])
if args.testcase != "" else set())
modset = {}
for case in caseset:
if case.find('/') > -1:
[mod, algo] = case.split('/')
if mod not in modset:
modset[mod] = []
if algo not in modset[mod]:
modset[mod].append(algo)
else:
modset[case] = []
# Loop through all modules
for moduleinfo in portspecs['modules']:
# Get module name
module = moduleinfo['name']
# Skip if doesn't meet specified modules
if modset is not None and len(modset) > 0 and module not in modset:
continue
# JIRA: MADLIB-1078 fix
# Skip pmml during install-check (when run without the -t option).
# We can still run install-check on pmml with '-t' option.
if not modset and module in ['pmml']:
continue
_info("> - %s" % module, verbose)
# Make a temp dir for this module (if doesn't exist)
cur_tmpdir = tmpdir + '/' + module + '/test' # tmpdir is a global variable
_make_dir(cur_tmpdir)
# Find the Python module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/modules/" + module):
maddir_mod_py = maddir + "/ports/" + portid + "/" + dbver + "/modules"
else:
maddir_mod_py = maddir + "/modules"
# Find the SQL module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/modules/" + module):
maddir_mod_sql = maddir + "/ports/" + portid + "/modules"
else:
maddir_mod_sql = maddir + "/modules"
# Prepare test schema
test_schema = "madlib_installcheck_%s" % (module)
_internal_run_query("DROP SCHEMA IF EXISTS %s CASCADE; CREATE SCHEMA %s;" %
(test_schema, test_schema), True)
_internal_run_query("GRANT ALL ON SCHEMA %s TO %s;" %
(test_schema, test_user), True)
# Switch to test user and prepare the search_path
pre_sql = '-- Switch to test user:\n' \
'SET ROLE %s;\n' \
'-- Set SEARCH_PATH for install-check:\n' \
'SET search_path=%s,%s;\n' \
% (test_user, test_schema, schema)
# Loop through all test SQL files for this module
sql_files = maddir_mod_sql + '/' + module + '/test/*.sql_in'
for sqlfile in sorted(glob.glob(sql_files), reverse=True):
# work-around for HAWQ
algoname = os.path.basename(sqlfile).split('.')[0]
# run only algo specified
if module in modset and len(modset[module]) > 0 \
and algoname not in modset[module]:
continue
# Set file names
tmpfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.tmp'
logfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.log'
# If there is no problem with the SQL file
milliseconds = 0
# Run the SQL
run_start = datetime.datetime.now()
retval = _run_sql_file(schema, maddir_mod_py, module,
sqlfile, tmpfile, logfile, pre_sql)
# Runtime evaluation
run_end = datetime.datetime.now()
milliseconds = round((run_end - run_start).seconds * 1000 +
(run_end - run_start).microseconds / 1000)
# Check the exit status
if retval != 0:
result = 'FAIL'
keeplogs = True
# Since every single statement in the test file gets logged,
# an empty log file indicates an empty or a failed test
elif os.path.isfile(logfile) and os.path.getsize(logfile) > 0:
result = 'PASS'
# Otherwise
else:
result = 'ERROR'
# Output result
print "TEST CASE RESULT|Module: " + module + \
"|" + os.path.basename(sqlfile) + "|" + result + \
"|Time: %d milliseconds" % (milliseconds)
if result == 'FAIL':
_error("Failed executing %s" % tmpfile, False)
_error("Check the log at %s" % logfile, False)
# Cleanup test schema for the module
_internal_run_query("DROP SCHEMA IF EXISTS %s CASCADE;" % (test_schema), True)
# Drop install-check user
_internal_run_query("DROP OWNED BY %s CASCADE;" % (test_user), True)
_internal_run_query("DROP USER %s;" % (test_user), True)
# -----------------------------------------------------------------------
# Unit tests
# -----------------------------------------------------------------------
class RevTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_rev_num(self):
# not using assertGreaterEqual to keep Python 2.6 compatibility
self.assertTrue(_get_rev_num('4.3.10') >= _get_rev_num('4.3.5'))
self.assertTrue(_get_rev_num('1.9.10-dev') >= _get_rev_num('1.9.9'))
self.assertNotEqual(_get_rev_num('1.9.10-dev'), _get_rev_num('1.9.10'))
self.assertEqual(_get_rev_num('1.9.10'), [1, 9, 10])
self.assertEqual(_get_rev_num('1.0.0+20130313144700'), [1, 0, 0, '20130313144700'])
self.assertNotEqual(_get_rev_num('1.0.0+20130313144700'),
_get_rev_num('1.0.0-beta+exp.sha.5114f85'))
def test_is_rev_gte(self):
# 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
# 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0
self.assertTrue(_is_rev_gte([], []))
self.assertTrue(_is_rev_gte([1, 9], [1, None]))
self.assertFalse(_is_rev_gte([1, None], [1, 9]))
self.assertTrue(_is_rev_gte(_get_rev_num('4.3.10'), _get_rev_num('4.3.5')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.0'), _get_rev_num('1.9.0')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.1'), _get_rev_num('1.9.0')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.1'), _get_rev_num('1.9')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.0'), _get_rev_num('1.9.0-dev')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.1'), _get_rev_num('1.9-dev')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.0-dev'), _get_rev_num('1.9.0-dev')))
self.assertTrue(_is_rev_gte([1, 9, 'rc', 1], [1, 9, 'dev', 0]))
self.assertFalse(_is_rev_gte(_get_rev_num('1.9.1'), _get_rev_num('1.10')))
self.assertFalse(_is_rev_gte([1, 9, 'dev', 1], [1, 9, 'rc', 0]))
self.assertFalse(_is_rev_gte([1, 9, 'alpha'], [1, 9, 'alpha', 0]))
self.assertFalse(_is_rev_gte([1, 9, 'alpha', 1], [1, 9, 'alpha', 'beta']))
self.assertFalse(_is_rev_gte([1, 9, 'alpha.1'], [1, 9, 'alpha.beta']))
self.assertFalse(_is_rev_gte([1, 9, 'beta', 2], [1, 9, 'beta', 4]))
self.assertFalse(_is_rev_gte([1, 9, 'beta', '1'], [1, 9, 'rc', '0']))
self.assertFalse(_is_rev_gte([1, 9, 'rc', 1], [1, 9, 0]))
self.assertFalse(_is_rev_gte([1, 9, '0.2'], [1, 9, '0.3']))
self.assertFalse(_is_rev_gte([1, 9, 'build2'], [1, 9, 'build3']))
self.assertFalse(_is_rev_gte(_get_rev_num('1.0.0+20130313144700'),
_get_rev_num('1.0.0-beta+exp.sha.5114f85')))
# ------------------------------------------------------------------------------
# Start Here
# ------------------------------------------------------------------------------
if __name__ == "__main__":
RUN_TESTS = False
if RUN_TESTS:
unittest.main()
else:
# Run main
main(sys.argv[1:])
# Optional log files cleanup
# keeplogs and tmpdir are global variables
if not keeplogs:
shutil.rmtree(tmpdir)
else:
print "INFO: Log files saved in " + tmpdir
| cooper-sloan/incubator-madlib | src/madpack/madpack.py | Python | apache-2.0 | 62,959 | [
"ORCA"
] | 5d8f1d08e4f632aee3624fac4a0f32cc675e8072a770a55666d4965a6a516692 |
import os
def read_babel_set(filename,atom):
#os.system('babel -i cml %s -o mol2 %s.mol2' % (filename,filename))
newfile = '%s.mol2' % filename
run_antechamber(newfile,atom)
def run_antechamber(filename,atom):
#os.system('module load amber')
#os.system('antechamber -i %s -fi mol2 -o %s -fo mol2 -c bcc' % (filename,filename))
bfile = open(filename)
blist = bfile.readlines()
partials = []
for i in range(len(blist)):
split1 = blist[i].split()
if blist[i] == "@<TRIPOS>ATOM\n":
for j in range(i+1,len(blist)):
if blist[j] == "@<TRIPOS>BOND\n":
break
else:
split = blist[j].split()
partials.append(split[8])
for i in range(len(atom)):
atom[i].opls_partial = partials[i]
| sipjca/cmlparser_py | babel.py | Python | apache-2.0 | 842 | [
"Amber"
] | b5f6df2fdc2cf99b28c2c0f6906f968f0fb464ad28f2669afd61edd6aca9b97b |
"""
Provides Accounting functionality to DIRAC
It includes 2 different Services:
* DataStore: where new records are inserted
* ReportGenerator: that produce reports using the inserted records
and the associated Clients:
* DataStoreClient
* ReportsClient
DIRAC Accounting uses a number of predefined Types that must include:
* Accounting keys (text) to classify the records
* Accounting fields (numeric) to included the accounted data
* bucket definition to set the granularity of the reports
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__package__ = "DIRAC.AccountingSystem"
| ic-hep/DIRAC | src/DIRAC/AccountingSystem/__init__.py | Python | gpl-3.0 | 649 | [
"DIRAC"
] | 81d279eab842a38958f2102c5cdc92cc04d7bbda32d8934c74c7cb0fd7a62f15 |
#
# Copyright (C) 2009, 2015, 2016, 2017, 2018, 2019, 2020
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import warnings
from configparser import ConfigParser
import numpy
from sherpa.utils import NoNewAttributesAfterInit, igamc
from sherpa.utils.err import FitErr, StatErr
from sherpa.data import DataSimulFit
from sherpa.models import SimulFitModel
from . import _statfcts
from sherpa import get_config
__all__ = ('Stat', 'Cash', 'CStat', 'LeastSq',
'Chi2Gehrels', 'Chi2ConstVar', 'Chi2DataVar', 'Chi2ModVar',
'Chi2XspecVar', 'Chi2',
'UserStat', 'WStat')
config = ConfigParser()
config.read(get_config())
# truncation_flag indicates whether or not model truncation
# should be performed. If true, use the truncation_value from
# the config file.
truncation_flag = config.get('statistics', 'truncate',
fallback='True').upper()
truncation_value = float(config.get('statistics', 'trunc_value',
fallback=1.0e-25))
if (bool(truncation_flag) is False or truncation_flag == "FALSE" or
truncation_flag == "NONE" or truncation_flag == "0"):
truncation_value = 1.0e-25
class Stat(NoNewAttributesAfterInit):
"""The base class for calculating a statistic given data and model."""
# Used by calc_stat
#
_calc = None
# This should be overridden by derived classes and set to True if the rstat and qvalue
# figures can be calculated for that class of statistics.
_can_calculate_rstat = None
def __init__(self, name):
self.name = name
NoNewAttributesAfterInit.__init__(self)
def __repr__(self):
if self.__doc__ is not None:
return self.__doc__
return ("<%s statistic instance '%s'>" %
(type(self).__name__, self.name))
@staticmethod
def _bundle_inputs(data, model):
"""Convert input into SimulFit instances.
Convert the inputs into `sherpa.data.DataSimulFit` and
`sherpa.models.model.SimulFitModel`
instances.
Parameters
----------
data : `sherpa.data.Data` or `sherpa.data.DataSimulFit`
The data set, or sets, to use.
model : `sherpa.models.model.Model` or `sherpa.models.model.SimulFitModel`
The model expression, or expressions. If a
`~sherpa.models.model.SimulFitModel`
is given then it must match the number of data sets in the
data parameter.
Returns
-------
data : `sherpa.data.DataSimulFit`
If the input was a `~sherpa.data.DataSimulFit` object
then this is just the input value.
model : `sherpa.models.model.SimulFitModel`
If the input was a `~sherpa.models.model.SimulFitModel`
object then this is just the input value.
"""
if not isinstance(data, DataSimulFit):
data = DataSimulFit('simulfit data', (data,))
if not isinstance(model, SimulFitModel):
model = SimulFitModel('simulfit model', (model,))
return data, model
@staticmethod
def _check_has_bins(data):
"""Raise an error if there are no noticed bins in the dataset.
Parameters
----------
data : `sherpa.data.DataSimulFit`
The data sets to use.
Raises
------
FitErr
Notes
-----
It is unclear whether this should error out if any particular
dataset has no noticed bins, or only if all the datasets have
no noticed bins (the latter approach is taken).
"""
for dset in data.datasets:
# Assume that the error column does not need to be
# calculated for this check, so staterrfunc can be
# None.
dep, _, _ = dset.to_fit(staterrfunc=None)
if numpy.iterable(dep) and len(dep) > 0:
return
raise FitErr('nobins')
@staticmethod
def _check_sizes_match(data, model):
"""Raise an error if number of datasets and models do not match.
Parameters
----------
data : `sherpa.data.DataSimulFit`
The data sets to use.
model : `sherpa.models.model.SimulFitModel`
The model expressions for each data set. It must match
the data parameter (the models are in the same order
as the data objects).
Raises
------
StatErr
"""
ndata = len(data.datasets)
nmdl = len(model.parts)
if ndata != nmdl:
raise StatErr('mismatch',
'number of data sets', ndata,
'model expressions', nmdl)
def _validate_inputs(self, data, model):
"""Ensure that the inputs are correct for the statistic.
The default behavior is to check that the data contains
at least one bin and that the number of datasets matches
the number of models. It also converts single values to
simultaneous objects, if necessary.
Parameters
----------
data : `sherpa.data.Data` or `sherpa.data.DataSimulFit`
The data set, or sets, to use.
model : `sherpa.models.model.Model` or `sherpa.models.model.SimulFitModel`
The model expressions for each data set. It must match
the data parameter (the models are in the same order
as the data objects).
Returns
-------
data : `sherpa.data.DataSimulFit`
If the input was a `~sherpa.data.DataSimulFit` object
then this is just the input value.
model : `sherpa.models.model.SimulFitModel`
If the input was a `~sherpa.models.model.SimulFitModel`
object then this is just the input value.
"""
if self._calc is None:
# This is a programmer error rather than a user error,
# so use NotImplementedError rather than
# StatErr('nostat', self.name, '_calc')
#
raise NotImplementedError("_calc method has not been set")
data, model = self._bundle_inputs(data, model)
self._check_has_bins(data)
self._check_sizes_match(data, model)
return data, model
def _get_fit_model_data(self, data, model):
data, model = self._validate_inputs(data, model)
fitdata = data.to_fit(staterrfunc=self.calc_staterror)
modeldata = data.eval_model_to_fit(model)
return fitdata, modeldata
# TODO:
# - should this accept sherpa.data.Data input instead of
# "raw" data (i.e. to match calc_stat)
# - should this be moved out of the base Stat class since
# isn't relevant for likelihood statistics?
#
def calc_staterror(self, data):
"""Return the statistic error values for the data.
Parameters
----------
data : scalar or 1D array of numbers
The data values.
Returns
-------
staterror : scalar or array of numbers
The errors for the input data values (matches the data
argument).
"""
raise NotImplementedError
# TODO: add *args, **kwargs?
def calc_stat(self, data, model):
"""Return the statistic value for the data and model.
Parameters
----------
data : `sherpa.data.Data` or `sherpa.data.DataSimulFit`
The data set, or sets, to use.
model : `sherpa.models.model.Model` or `sherpa.models.model.SimulFitModel`
The model expression, or expressions. If a
`sherpa.models.model.SimulFitModel`
is given then it must match the number of data sets in the
data parameter.
Returns
-------
statval : number
The value of the statistic.
fvec : array of numbers
The per-bin "statistic" value.
"""
raise NotImplementedError
def goodness_of_fit(self, statval, dof):
"""Return the reduced statistic and q value.
The reduced statisitc is conceptually simple, as it is just
statistic / degrees-of-freedom, but it is not meaningful for
all statistics, and it is only valid if there are any degrees
of freedom.
Parameters
----------
statval : float
The statistic value. It is assumed to be finite.
dof : int
The number of degrees of freedom, which may be 0 or negative.
Returns
-------
rstat : float or NaN or None
The reduced statistic. If the statistic does not support
a goodness of fit then the return value is `None`. If it does
then NaN is returned if either the number of degrees of freedom is
0 (or less), or the statistic value is less than 0.
qval : float or NaN or None
The q value. If the statistic does not support
a goodness of fit then the return values are `None`. If it does
then NaN is returned if either the number of degrees of freedom is
0 (or less), or the statistic value is less than 0.
"""
if not self._can_calculate_rstat:
return None, None
if dof > 0 and statval >= 0.0:
qval = igamc(dof / 2.0, statval / 2.0)
rstat = statval / dof
return rstat, qval
return numpy.nan, numpy.nan
class Likelihood(Stat):
"""Likelihood functions"""
def __init__(self, name='likelihood'):
Stat.__init__(self, name)
@staticmethod
def calc_staterror(data):
# Likelihood stats do not have 'errors' associated with them.
# return 1 to avoid dividing by 0 by some optimization methods.
return numpy.ones_like(data)
def _check_background_subtraction(self, data):
"""Raise an error if any dataset has been background subtracted.
Parameters
----------
data : a DataSimulFit instance
The data sets to use.
Raises
------
FitErr
"""
for dobj in data.datasets:
if getattr(dobj, 'subtracted', False):
# TODO:
# Historically this has been a FitErr, but it
# would make more sense to be a StatErr. This could
# break people's code, so hold off for now
# (October 2016).
#
raise FitErr('statnotforbackgsub', self.name)
def _validate_inputs(self, data, model):
data, model = Stat._validate_inputs(self, data, model)
self._check_background_subtraction(data)
return data, model
def calc_stat(self, data, model):
fitdata, modeldata = self._get_fit_model_data(data, model)
return self._calc(fitdata[0], modeldata, None,
truncation_value)
# DOC-TODO: where is the truncate/trunc_value stored for objects
# AHA: it appears to be taken straight from the config
# file rather than associated with the Stat class
# DOC-TODO: where to talk about the .sherpa.rc config file?
class Cash(Likelihood):
"""Poisson Log-likelihood function.
Counts are sampled from the Poisson distribution, and so the best
way to assess the quality of model fits is to use the product of
individual Poisson probabilities computed in each bin i, or the
likelihood L:
L = (product)_i [ M(i)^D(i)/D(i)! ] * exp[-M(i)]
where M(i) = S(i) + B(i) is the sum of source and background model
amplitudes, and D(i) is the number of observed counts, in bin i.
The Cash statistic [1]_ is derived by (1) taking the logarithm of
the likelihood function, (2) changing its sign, (3) dropping the
factorial term (which remains constant during fits to the same
dataset), and (4) multiplying by two:
C = 2 * (sum)_i [ M(i) - D(i) log M(i) ]
The factor of two exists so that the change in cash statistic from
one model fit to the next, (Delta)C, is distributed approximately
as (Delta)chi-square when the number of counts in each bin is
high. One can then in principle use (Delta)C instead of
(Delta)chi-square in certain model comparison tests. However,
unlike chi-square, the cash statistic may be used regardless of
the number of counts in each bin.
The magnitude of the Cash statistic depends upon the number of
bins included in the fit and the values of the data
themselves. Hence one cannot analytically assign a
goodness-of-fit measure to a given value of the Cash statistic.
Such a measure can, in principle, be computed by performing
Monte Carlo simulations. One would repeatedly sample new
datasets from the best-fit model, fit them, and note where the
observed Cash statistic lies within the derived distribution
of Cash statistics. Alternatively, the `cstat` statistic can
be used.
Notes
-----
The background should not be subtracted from the data when this
statistic is used. It should be modeled simultaneously with the
source.
The Cash statistic function evaluates the logarithm of each data
point. If the number of counts is zero or negative, it's not
possible to take the log of that number. The behavior in this case
is controlled by the `truncate` and `trunc_value` settings in the
.sherpa.rc file:
- if `truncate` is `True` (the default value), then
`log(trunc_value)` is used whenever the data value is <= 0. The
default is `trunc_value=1.0e-25`.
- when `truncate` is `False` an error is raised.
References
----------
.. [1] "Parameter estimation in astronomy through application of
the likelihood ratio", Cash, W. 1979, ApJ 228, 939
http://adsabs.harvard.edu/abs/1979ApJ...228..939C
"""
_calc = _statfcts.calc_cash_stat
def __init__(self, name='cash'):
Likelihood.__init__(self, name)
class CStat(Likelihood):
"""Poisson Log-likelihood function (XSPEC style).
This is equivalent to the XSPEC implementation of the
Cash statistic [1]_ except that it requires a model to be fit
to the background. To handle the background in the same manner
as XSPEC, use the WStat statistic.
Counts are sampled from the Poisson distribution, and so the best
way to assess the quality of model fits is to use the product of
individual Poisson probabilities computed in each bin i, or the
likelihood L:
L = (product)_i [ M(i)^D(i)/D(i)! ] * exp[-M(i)]
where M(i) = S(i) + B(i) is the sum of source and background model
amplitudes, and D(i) is the number of observed counts, in bin i.
The cstat statistic is derived by (1) taking the logarithm of the
likelihood function, (2) changing its sign, (3) dropping the
factorial term (which remains constant during fits to the same
dataset), (4) adding an extra data-dependent term (this is what
makes it different to `Cash`, and (5) multiplying by two:
C = 2 * (sum)_i [ M(i) - D(i) + D(i)*[log D(i) - log M(i)] ]
The factor of two exists so that the change in the cstat statistic
from one model fit to the next, (Delta)C, is distributed
approximately as (Delta)chi-square when the number of counts in
each bin is high. One can then in principle use (Delta)C instead
of (Delta)chi-square in certain model comparison tests. However,
unlike chi-square, the cstat statistic may be used regardless of
the number of counts in each bin.
The inclusion of the data term in the expression means that,
unlike the Cash statistic, one can assign an approximate
goodness-of-fit measure to a given value of the cstat statistic,
i.e. the observed statistic, divided by the number of degrees of
freedom, should be of order 1 for good fits.
Notes
-----
The background should not be subtracted from the data when this
statistic is used. It should be modeled simultaneously with the
source.
The cstat statistic function evaluates the logarithm of each data
point. If the number of counts is zero or negative, it's not
possible to take the log of that number. The behavior in this case
is controlled by the `truncate` and `trunc_value` settings in the
.sherpa.rc file:
- if `truncate` is `True` (the default value), then
`log(trunc_value)` is used whenever the data value is <= 0. The
default is `trunc_value=1.0e-25`.
- when `truncate` is `False` an error is raised.
References
----------
.. [1] The description of the Cash statistic (`cstat`) in
https://heasarc.gsfc.nasa.gov/xanadu/xspec/manual/XSappendixStatistics.html
"""
_calc = _statfcts.calc_cstat_stat
_can_calculate_rstat = True
def __init__(self, name='cstat'):
Likelihood.__init__(self, name)
class Chi2(Stat):
"""A Gaussian Log-likelihood function.
It is assumed that the counts are sampled from the Gaussian
(Normal) distribution and so the best way to assess the quality of
model fit is to use the product of individual Gaussian probabilities
computed in each bin i, or the likelihood:
L = (prod)_i 1/(sigma^2 sqrt(2 pi)) exp[(N(i) - M(i))^2/2 sigma(i)^2]
where M(i) = S(i) + B(i) is the sum of source and background model
amplitudes, and N(i) is the total number of observed counts in bin i.
The chi-square statistic is:
chi^2 = (sum)_i [ [ N(i,S) - B(i,x,pB) - S(i,x,pS) ]^2 / sigma(i)^2 ]
where N(i,S) is the total number of observed counts in bin i of
the on-source region; B(i,x,pB) is the number of predicted
background model counts in bin i of the on-source region (zero for
background-subtracted data), rescaled from bin i of the off-source
region, and computed as a function of the model argument x(i)
(e.g., energy or time) and set of background model parameter
values pB; S(i,x,pS) is the number of predicted source model
counts in bin i, as a function of the model argument x(i) and set
of source model parameter values pS; and sigma(i) is the error in
bin i.
Note that there are several weightings of this statistics depending
on calculation of sigma(i). N(i,S) contains the background counts and
in a case of background subtraction the number of contributing
background counts needs to be estimated from the background, so an
off-source region. In such case, N(i,B) is the total number of observed
counts in bin i of the off-source region; A(B) is the off-source "area",
which could be the size of the region from which the background is extracted, or
the length of a background time segment, or a product of the two,
etc.; and A(S) is the on-source "area". These terms may be defined
for a particular type of data: for example, PHA data sets A(B) to
`BACKSCAL * EXPOSURE` from the background data set and A(S) to
`BACKSCAL * EXPOSURE` from the source data set.
There are different ways of defining the sigma(i) terms,
supported by the sub-classes.
Notes
-----
It is assumed that there is a one-to-one mapping between a given
background region bin and a given source region bin. For
instance, in the analysis of PHA data, it is assumed that the
input background counts spectrum is binned in exactly the same way
as the input source counts spectrum, and any filter applied to the
source spectrum automatically applied to the background spectrum.
This means that the user cannot, for example, specify arbitrary
background and source regions in two dimensions and get correct
results. This limitation *only* applies to backgrounds included
included as part of the data set - e.g. as with PHA files - and
can be avoided by treating the background as a separate data set.
"""
_calc = _statfcts.calc_chi2_stat
_can_calculate_rstat = True
def __init__(self, name='chi2'):
Stat.__init__(self, name)
@staticmethod
def calc_staterror(data):
raise StatErr('chi2noerr')
def calc_stat(self, data, model):
fitdata, modeldata = self._get_fit_model_data(data, model)
return self._calc(fitdata[0], modeldata,
fitdata[1], fitdata[2],
None, # TODO: weights
truncation_value)
def calc_chisqr(self, data, model):
"""Return the chi-square value for each bin.
Parameters
----------
data : `sherpa.data.Data` or `sherpa.data.DataSimulFit`
The data set, or sets, to use.
model : `sherpa.models.model.Model` or `sherpa.models.model.SimulFitModel`
The model expression, or expressions. If a
`sherpa.models.model.SimulFitModel`
is given then it must match the number of data sets in the
data parameter.
Returns
-------
chisqr : array of numbers
The per-bin chi-square values.
"""
_, fvec = self.calc_stat(data, model)
return fvec * fvec
class LeastSq(Chi2):
"""Least Squared Statistic.
The least-square statistic is equivalent to a chi-square
statistic where the error on each point - sigma(i) - is 1.
"""
_calc = _statfcts.calc_lsq_stat
_can_calculate_rstat = False
def __init__(self, name='leastsq'):
Stat.__init__(self, name)
@staticmethod
def calc_staterror(data):
return numpy.ones_like(data)
class Chi2Gehrels(Chi2):
"""Chi Squared with Gehrels variance.
The variance is estimated from the number of counts in each bin,
but unlike `Chi2DataVar`, the Gaussian approximation is not
used. This makes it more-suitable for use with low-count data.
The standard deviation for each bin is calculated using the
approximation from [1]_:
sigma(i,S) = 1 + sqrt(N(i,s) + 0.75)
where the higher-order terms have been dropped. This is accurate
to approximately one percent. For data where the background has
not been subtracted then the error term is:
sigma(i) = sigma(i,S)
whereas with background subtraction,
sigma(i)^2 = sigma(i,S)^2 + [A(S)/A(B)]^2 sigma(i,B)^2
A(B) is the off-source "area", which could be
the size of the region from which the background is extracted, or
the length of a background time segment, or a product of the two,
etc.; and A(S) is the on-source "area". These terms may be defined
for a particular type of data: for example, PHA data sets A(B) to
`BACKSCAL * EXPOSURE` from the background data set and A(S) to
`BACKSCAL * EXPOSURE` from the source data set.
See Also
--------
Chi2DataVar, Chi2ModVar, Chi2XspecVar
Notes
-----
The accuracy of the error term when the background has been
subtracted has not been determined. A preferable approach to
background subtraction is to model the background as well as the
source signal.
References
----------
.. [1] "Confidence limits for small numbers of events in
astrophysical data", Gehrels, N. 1986, ApJ, vol 303,
p. 336-346.
http://adsabs.harvard.edu/abs/1986ApJ...303..336G
"""
def __init__(self, name='chi2gehrels'):
Chi2.__init__(self, name)
@staticmethod
def calc_staterror(data):
return _statfcts.calc_chi2gehrels_errors(data)
class Chi2ConstVar(Chi2):
"""Chi Squared with constant variance.
The variance is the same in each bin, and set to be the mean
number of counts in the data:
sigma(i)^2 = (1/K) * (sum)_(j=1)^K N(j,S) + [A(S)/A(B)]^2 N(j,B)
where K is the number of on-source (and off-source) bins included
in the fit. The background term appears only if an estimate of the
background has been subtracted from the data.
"""
def __init__(self, name='chi2constvar'):
Chi2.__init__(self, name)
@staticmethod
def calc_staterror(data):
return _statfcts.calc_chi2constvar_errors(data)
class Chi2DataVar(Chi2):
"""Chi Squared with data variance.
The variance in each bin is estimated from the data value in that
bin.
If the number of counts in each bin is large, then the shape of
the Poisson distribution from which the counts are sampled tends
asymptotically towards that of a Gaussian distribution, with
variance
sigma(i)^2 = N(i,S) + [A(S)/A(B)]^2 N(i,B)
where N is the number of on-source (and off-source) bins included
in the fit. The background term appears only if an estimate of the
background has been subtracted from the data.
A(B) is the off-source "area", which could be
the size of the region from which the background is extracted, or
the length of a background time segment, or a product of the two,
etc.; and A(S) is the on-source "area". These terms may be defined
for a particular type of data: for example, PHA data sets A(B) to
`BACKSCAL * EXPOSURE` from the background data set and A(S) to
`BACKSCAL * EXPOSURE` from the source data set.
See Also
--------
Chi2Gehrels, Chi2ModVar, Chi2XspecVar
"""
def __init__(self, name='chi2datavar'):
Chi2.__init__(self, name)
@staticmethod
def calc_staterror(data):
return _statfcts.calc_chi2datavar_errors(data)
class Chi2ModVar(Chi2):
"""Chi Squared with model amplitude variance.
The variance in each bin is estimated from the *model* value in
that bin. This contrasts with other Chi-squared statics - such
as `Chi2DataVar`, Chi2XspecVar`, and `Chi2Gehrels` - which use
the data values. The variance is
sigma(i)^2 = S(i) + [A(S)/A(B)]^2 B(i,off)
where B(i,off) is the background model amplitude in bin i of the
off-source region.
A(B) is the off-source "area", which could be
the size of the region from which the background is extracted, or
the length of a background time segment, or a product of the two,
etc.; and A(S) is the on-source "area". These terms may be defined
for a particular type of data: for example, PHA data sets A(B) to
`BACKSCAL * EXPOSURE` from the background data set and A(S) to
`BACKSCAL * EXPOSURE` from the source data set.
See Also
--------
Chi2DataVar, Chi2Gehrels, Chi2XspecVar
Notes
-----
The background should not be subtracted from the data when this
statistic is used, as it underestimates the variance when fitting
background-subtracted data.
"""
_calc = _statfcts.calc_chi2modvar_stat
def __init__(self, name='chi2modvar'):
Chi2.__init__(self, name)
# Statistical errors are not used
@staticmethod
def calc_staterror(data):
return numpy.zeros_like(data)
class Chi2XspecVar(Chi2):
"""Chi Squared with data variance (XSPEC style).
The variance in each bin is estimated from the data value in that
bin.
The calculation of the variance is the same as `Chi2DataVar`
except that if the number of counts in a bin is less than 1
then the variance for that bin is set to 1.
See Also
--------
Chi2DataVar, Chi2Gehrels, Chi2ModVar
"""
def __init__(self, name='chi2xspecvar'):
Chi2.__init__(self, name)
@staticmethod
def calc_staterror(data):
return _statfcts.calc_chi2xspecvar_errors(data)
class UserStat(Stat):
"""Support simple user-supplied statistic calculations.
Notes
-----
This class is used by the `sherpa.ui.load_user_stat`
to provide a user-definable statistic calculation as
a function. For more complicated cases it is suggested that
users should write their own class instead of using
this one.
"""
def __init__(self, statfunc=None, errfunc=None, name='userstat'):
self._statfuncset = False
self.statfunc = (lambda x: None)
self._staterrfuncset = False
self.errfunc = (lambda x: None)
if statfunc is not None:
self.statfunc = statfunc
self._calc = statfunc
self._statfuncset = True
if errfunc is not None:
self.errfunc = errfunc
self._staterrfuncset = True
Stat.__init__(self, name)
def __getstate__(self):
state = self.__dict__.copy()
# Function pointers to methods of the class
# (of type 'instancemethod') are NOT picklable
# remove them and restore later with a coord init
del state['statfunc']
del state['errfunc']
return state
def __setstate__(self, state):
# Populate the function pointers we deleted at pickle time with
# no-ops.
self.__dict__['statfunc'] = (lambda x: None)
self.__dict__['errfunc'] = (lambda x: None)
self.__dict__.update(state)
def set_statfunc(self, func):
self.statfunc = func
self._statfuncset = True
def set_errfunc(self, func):
self.errfunc = func
self._staterrfuncset = True
def calc_staterror(self, data):
if not self._staterrfuncset:
raise StatErr('nostat', self.name, 'calc_staterror()')
return self.errfunc(data)
def calc_stat(self, data, model):
if not self._statfuncset:
raise StatErr('nostat', self.name, 'calc_stat()')
fitdata, modeldata = self._get_fit_model_data(data, model)
return self.statfunc(fitdata[0],
modeldata,
staterror=fitdata[1],
syserror=fitdata[2],
weight=None) # TODO weights
class WStat(Likelihood):
"""Poisson Log-likelihood function including background (XSPEC style).
This is equivalent to the XSPEC implementation of the
W statistic for CStat [1]_, and includes the background data in
the fit statistic. If a model is being fit to the background then
the CStat statistic should be used.
The following description is taken from [1]_.
Suppose that each bin in the background spectrum is given its own
parameter so that the background model is b_i = f_i. A standard fit
for all these parameters would be impractical; however there is an
analytical solution for the best-fit f_i in terms of the other
variables which can be derived by using the fact that the derivative
of the likelihood (L) will be zero at the best fit. Solving for the
f_i and substituting gives the profile likelihood::
W = 2 sum_(i=1)^N t_s m_i + (t_s + t_b) f_i -
S_i ln(t_s m_i + t_s f_i) - B_i ln(t_b f_i) -
S_i (1- ln(S_i)) - B_i (1 - ln(B_i))
where::
f_i = (S_i + B_i - (t_s + t_b) m_i + d_i) / (2 (t_s + t_b))
d_i = sqrt([(t_s + t_b) m_i - S_i - B_i]^2 +
4(t_s + t_b) B_i m_i)
If any bin has S_i and/or B_i zero then its contribution to W (W_i)
is calculated as a special case. So, if S_i is zero then::
W_i = t_s m_i - B_i ln(t_b / (t_s + t_b))
If B_i is zero then there are two special cases. If
m_i < S_i / (t_s + t_b) then::
W_i = - t_b m_i - S_i ln(t_s / (t_s + t_b))
otherwise::
W_i = t_s m_i + S_i (ln(S_i) - ln(t_s m_i) - 1)
In practice, it works well for many cases but for weak sources can
generate an obviously wrong best fit. It is not clear why this happens
although binning to ensure that every bin contains at least one count
often seems to fix the problem. In the limit of large numbers of counts
per spectrum bin a second-order Taylor expansion shows that W tends to::
sum_(i=1)^N ( [S_i - t_s m_i - t_s f_i]^2 / (t_s (m_i + f_i)) +
[B_i - t_b f_i]^2 / (t_b f_i) )
which is distributed as chi^2 with N - M degrees of freedom, where
the model m_i has M parameters (include the normalization).
References
----------
.. [1] The description of the W statistic (`wstat`) in
https://heasarc.gsfc.nasa.gov/xanadu/xspec/manual/XSappendixStatistics.html
"""
_calc = _statfcts.calc_wstat_stat
_can_calculate_rstat = True
def __init__(self, name='wstat'):
Likelihood.__init__(self, name)
def calc_stat(self, data, model):
data, model = self._validate_inputs(data, model)
# Need access to backscal values and background data filtered
# and grouped in the same manner as the data. There is no
# easy access to this via the Data API (in part because the
# Data class has no knowledge of grouping or backscale values).
#
# An alternative approach would be to just calculate the
# statistic for each dataset individually and then
# sum the statistics for the return value, but the
# original code used this approach.
#
data_src = []
data_model = data.eval_model_to_fit(model)
data_bkg = []
nelems = []
exp_src = []
exp_bkg = []
backscales = []
# Why are we looping over model.parts as it isn't used?
# Is it just a way to restrict to only use those
# datasets for which we have a model?
#
for dset, mexpr in zip(data.datasets, model.parts):
y = dset.to_fit(staterrfunc=None)[0]
data_src.append(y)
nelems.append(y.size)
try:
bids = dset.background_ids
except AttributeError:
raise StatErr('usecstat') from None
nbkg = len(bids)
if nbkg == 0:
raise StatErr('usecstat')
elif nbkg > 1:
# TODO: improve warning
warnings.warn("Only using first background component for data set {}".format(dset.name))
bid = bids[0]
bset = dset.get_background(bid)
# TODO: the following should be reviewed to see what
# happens if optional information is missing (e.g. if
# BACKSCAL is not set we should default to all 1's,
# but does this code handle it?)
#
data_bkg.append(dset.apply_filter(bset.get_dep(False),
groupfunc=numpy.sum))
# The assumption is that the source and background datasets
# have the same number of channels (before any grouping or
# filtering is applied).
#
# Since the backscal values can be a scalar or array, it is
# easiest just to convert everything to an array.
#
dummy = numpy.ones(dset.get_dep(False).size)
# Combine the BACKSCAL values (use the default _middle
# scheme as this is used elsewhere when combining
# BACKSCAL values; perhaps there should be an API call
# for this?).
#
src_backscal = dset.apply_filter(dset.backscal * dummy,
groupfunc=dset._middle)
bkg_backscal = dset.apply_filter(bset.backscal * dummy,
groupfunc=dset._middle)
backscales.append(bkg_backscal / src_backscal)
# The AREASCAL values are applied to the exposure
# times, since this is how XSPEC handles this (at
# least that's my undertanding of a conversation with
# Keith Arnaud, for XSPEC ~ version 12.9). This requires
# turning an exposure into an array if there's no
# AREASCAl value
#
# For now we follow the same approach as the BACKSCAL
# values if the data is grouped.
#
#
if dset.areascal is None:
ascal = dummy[:dset.get_dep(True).size]
else:
ascal = dset.apply_filter(dset.areascal * dummy,
groupfunc=dset._middle)
exp_src.append(dset.exposure * ascal)
if bset.areascal is None:
ascal = dummy[:dset.get_dep(True).size]
else:
ascal = dset.apply_filter(bset.areascal * dummy,
groupfunc=dset._middle)
exp_bkg.append(bset.exposure * ascal)
data_src = numpy.concatenate(data_src)
exp_src = numpy.concatenate(exp_src)
exp_bkg = numpy.concatenate(exp_bkg)
data_bkg = numpy.concatenate(data_bkg)
backscales = numpy.concatenate(backscales)
return self._calc(data_src, data_model, nelems,
exp_src, exp_bkg,
data_bkg, backscales,
truncation_value)
| anetasie/sherpa | sherpa/stats/__init__.py | Python | gpl-3.0 | 37,702 | [
"Gaussian"
] | f33c1ef5cbb8be1f8270f4c6d644b54d5ef8244118227e4b2b771adfa577cba9 |
# $Id$
#
# Copyright (c) 2007, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum, July 2007
_version = "0.13.0"
_usage="""
CreateDb [optional arguments] <filename>
NOTES:
- the property names for the database are the union of those for
all molecules.
- missing property values will be set to 'N/A', though this can be
changed with the --missingPropertyVal argument.
- The property names may be altered on loading the database. Any
non-alphanumeric character in a property name will be replaced
with '_'. e.g. "Gold.Goldscore.Constraint.Score" becomes
"Gold_Goldscore_Constraint_Score". This is important to know
when querying.
- Property names are not case sensitive in the database; this may
cause some problems if they are case sensitive in the sd file.
"""
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.Dbase import DbModule
from rdkit.RDLogger import logger
from rdkit.Chem.MolDb import Loader
logger = logger()
import sys,os
from rdkit.six.moves import cPickle
from rdkit.Chem.MolDb.FingerprintUtils import BuildSigFactory,LayeredOptions
from rdkit.Chem.MolDb import FingerprintUtils
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
from optparse import OptionParser
parser=OptionParser(_usage,version='%prog '+_version)
parser.add_option('--outDir','--dbDir',default='',
help='name of the output directory')
parser.add_option('--molDbName',default='Compounds.sqlt',
help='name of the molecule database')
parser.add_option('--molIdName',default='compound_id',
help='name of the database key column')
parser.add_option('--regName',default='molecules',
help='name of the molecular registry table')
parser.add_option('--pairDbName',default='AtomPairs.sqlt',
help='name of the atom pairs database')
parser.add_option('--pairTableName',default='atompairs',
help='name of the atom pairs table')
parser.add_option('--fpDbName',default='Fingerprints.sqlt',
help='name of the 2D fingerprints database')
parser.add_option('--fpTableName',default='rdkitfps',
help='name of the 2D fingerprints table')
parser.add_option('--layeredTableName',default='layeredfps',
help='name of the layered fingerprints table')
parser.add_option('--descrDbName',default='Descriptors.sqlt',
help='name of the descriptor database')
parser.add_option('--descrTableName',default='descriptors_v1',
help='name of the descriptor table')
parser.add_option('--descriptorCalcFilename',default=os.path.join(RDConfig.RDBaseDir,'Projects',
'DbCLI','moe_like.dsc'),
help='name of the file containing the descriptor calculator')
parser.add_option('--errFilename',default='loadErrors.txt',
help='name of the file to contain information about molecules that fail to load')
parser.add_option('--noPairs',default=True,dest='doPairs',action='store_false',
help='skip calculating atom pairs')
parser.add_option('--noFingerprints',default=True,dest='doFingerprints',action='store_false',
help='skip calculating 2D fingerprints')
parser.add_option('--noLayeredFps',default=True,dest='doLayered',action='store_false',
help='skip calculating layered fingerprints')
parser.add_option('--noDescriptors',default=True,dest='doDescriptors',action='store_false',
help='skip calculating descriptors')
parser.add_option('--noProps',default=False,dest='skipProps',action='store_true',
help="don't include molecular properties in the database")
parser.add_option('--noSmiles',default=False,dest='skipSmiles',action='store_true',
help="don't include SMILES in the database (can make loading somewhat faster)")
parser.add_option('--maxRowsCached',default=-1,
help="maximum number of rows to cache before doing a database commit")
parser.add_option('--silent',default=False,action='store_true',
help='do not provide status messages')
parser.add_option('--molFormat',default='',choices=('smiles','sdf',''),
help='specify the format of the input file')
parser.add_option('--nameProp',default='_Name',
help='specify the SD property to be used for the molecule names. Default is to use the mol block name')
parser.add_option('--missingPropertyVal',default='N/A',
help='value to insert in the database if a property value is missing. Default is %default.')
parser.add_option('--addProps',default=False,action='store_true',
help='add computed properties to the output')
parser.add_option('--noExtras',default=False,action='store_true',
help='skip all non-molecule databases')
parser.add_option('--skipLoad','--skipMols',action="store_false",dest='loadMols',default=True,
help='skip the molecule loading (assumes mol db already exists)')
parser.add_option('--updateDb','--update',default=False,action='store_true',
help='add to an existing database')
parser.add_option('--doPharm2D',default=False,
action='store_true',
help='skip calculating Pharm2D fingerprints')
parser.add_option('--pharm2DTableName',default='pharm2dfps',
help='name of the Pharm2D fingerprints table')
parser.add_option('--fdefFile','--fdef',
default=os.path.join(RDConfig.RDDataDir,'Novartis1.fdef'),
help='provide the name of the fdef file to use for 2d pharmacophores')
parser.add_option('--doGobbi2D',default=False,
action='store_true',
help='skip calculating Gobbi 2D fingerprints')
parser.add_option('--gobbi2DTableName',default='gobbi2dfps',
help='name of the Gobbi 2D fingerprints table')
parser.add_option('--noMorganFps','--noCircularFps',default=True,dest='doMorganFps',action='store_false',
help='skip calculating Morgan (circular) fingerprints')
parser.add_option('--morganFpTableName',default='morganfps',
help='name of the Morgan fingerprints table')
parser.add_option('--delimiter','--delim',default=' ',
help='the delimiter in the input file')
parser.add_option('--titleLine',default=False,action='store_true',
help='the input file contains a title line')
parser.add_option('--smilesColumn','--smilesCol',default=0,type='int',
help='the column index with smiles')
parser.add_option('--nameColumn','--nameCol',default=1,type='int',
help='the column index with mol names')
def CreateDb(options,dataFilename='',supplier=None):
if not dataFilename and supplier is None:
raise ValueError('Please provide either a data filename or a supplier')
if options.errFilename:
errFile=open(os.path.join(options.outDir,options.errFilename),'w+')
else:
errFile=None
if options.noExtras:
options.doPairs=False
options.doDescriptors=False
options.doFingerprints=False
options.doPharm2D=False
options.doGobbi2D=False
options.doLayered=False
options.doMorganFps=False
if options.loadMols:
if supplier is None:
if not options.molFormat:
ext = os.path.splitext(dataFilename)[-1].lower()
if ext=='.sdf':
options.molFormat='sdf'
elif ext in ('.smi','.smiles','.txt','.csv'):
options.molFormat='smiles'
if not options.delimiter:
# guess the delimiter
import csv
sniffer = csv.Sniffer()
dlct=sniffer.sniff(open(dataFilename,'r').read(2000))
options.delimiter=dlct.delimiter
if not options.silent:
logger.info('Guessing that delimiter is %s. Use --delimiter argument if this is wrong.'%repr(options.delimiter))
if not options.silent:
logger.info('Guessing that mol format is %s. Use --molFormat argument if this is wrong.'%repr(options.molFormat))
if options.molFormat=='smiles':
if options.delimiter=='\\t': options.delimiter='\t'
supplier=Chem.SmilesMolSupplier(dataFilename,
titleLine=options.titleLine,
delimiter=options.delimiter,
smilesColumn=options.smilesColumn,
nameColumn=options.nameColumn
)
else:
supplier = Chem.SDMolSupplier(dataFilename)
if not options.silent: logger.info('Reading molecules and constructing molecular database.')
Loader.LoadDb(supplier,os.path.join(options.outDir,options.molDbName),
errorsTo=errFile,regName=options.regName,nameCol=options.molIdName,
skipProps=options.skipProps,defaultVal=options.missingPropertyVal,
addComputedProps=options.addProps,uniqNames=True,
skipSmiles=options.skipSmiles,maxRowsCached=int(options.maxRowsCached),
silent=options.silent,nameProp=options.nameProp,
lazySupplier=int(options.maxRowsCached)>0,
startAnew=not options.updateDb
)
if options.doPairs:
pairConn = DbConnect(os.path.join(options.outDir,options.pairDbName))
pairCurs = pairConn.GetCursor()
try:
pairCurs.execute('drop table %s'%(options.pairTableName))
except:
pass
pairCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,atompairfp blob,torsionfp blob)'%(options.pairTableName,
options.molIdName))
if options.doFingerprints or options.doPharm2D or options.doGobbi2D or options.doLayered:
fpConn = DbConnect(os.path.join(options.outDir,options.fpDbName))
fpCurs=fpConn.GetCursor()
try:
fpCurs.execute('drop table %s'%(options.fpTableName))
except:
pass
try:
fpCurs.execute('drop table %s'%(options.pharm2DTableName))
except:
pass
try:
fpCurs.execute('drop table %s'%(options.gobbi2DTableName))
except:
pass
try:
fpCurs.execute('drop table %s'%(options.layeredTableName))
except:
pass
if options.doFingerprints:
fpCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,rdkfp blob)'%(options.fpTableName,
options.molIdName))
if options.doLayered:
layeredQs = ','.join('?'*LayeredOptions.nWords)
colDefs=','.join(['Col_%d integer'%(x+1) for x in range(LayeredOptions.nWords)])
fpCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,%s)'%(options.layeredTableName,
options.molIdName,
colDefs))
if options.doPharm2D:
fpCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,pharm2dfp blob)'%(options.pharm2DTableName,
options.molIdName))
sigFactory = BuildSigFactory(options)
if options.doGobbi2D:
fpCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,gobbi2dfp blob)'%(options.gobbi2DTableName,
options.molIdName))
from rdkit.Chem.Pharm2D import Generate,Gobbi_Pharm2D
if options.doMorganFps :
fpConn = DbConnect(os.path.join(options.outDir,options.fpDbName))
fpCurs=fpConn.GetCursor()
try:
fpCurs.execute('drop table %s'%(options.morganFpTableName))
except:
pass
fpCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,morganfp blob)'%(options.morganFpTableName,
options.molIdName))
if options.doDescriptors:
descrConn=DbConnect(os.path.join(options.outDir,options.descrDbName))
calc = cPickle.load(open(options.descriptorCalcFilename,'rb'))
nms = [x for x in calc.GetDescriptorNames()]
descrCurs = descrConn.GetCursor()
descrs = ['guid integer not null primary key','%s varchar not null unique'%options.molIdName]
descrs.extend(['%s float'%x for x in nms])
try:
descrCurs.execute('drop table %s'%(options.descrTableName))
except:
pass
descrCurs.execute('create table %s (%s)'%(options.descrTableName,','.join(descrs)))
descrQuery=','.join([DbModule.placeHolder]*len(descrs))
pairRows = []
fpRows = []
layeredRows = []
descrRows = []
pharm2DRows=[]
gobbi2DRows=[]
morganRows = []
if not options.silent: logger.info('Generating fingerprints and descriptors:')
molConn = DbConnect(os.path.join(options.outDir,options.molDbName))
molCurs = molConn.GetCursor()
if not options.skipSmiles:
molCurs.execute('select guid,%s,smiles,molpkl from %s'%(options.molIdName,options.regName))
else:
molCurs.execute('select guid,%s,molpkl from %s'%(options.molIdName,options.regName))
i=0
while 1:
try:
tpl = molCurs.fetchone()
molGuid = tpl[0]
molId = tpl[1]
pkl = tpl[-1]
i+=1
except:
break
if isinstance(pkl,(bytes,str)):
mol = Chem.Mol(pkl)
else:
mol = Chem.Mol(str(pkl))
if not mol: continue
if options.doPairs:
pairs = FingerprintUtils.BuildAtomPairFP(mol)
torsions = FingerprintUtils.BuildTorsionsFP(mol)
pkl1 = DbModule.binaryHolder(pairs.ToBinary())
pkl2 = DbModule.binaryHolder(torsions.ToBinary())
row = (molGuid,molId,pkl1,pkl2)
pairRows.append(row)
if options.doFingerprints:
fp2 = FingerprintUtils.BuildRDKitFP(mol)
pkl = DbModule.binaryHolder(fp2.ToBinary())
row = (molGuid,molId,pkl)
fpRows.append(row)
if options.doLayered:
words = LayeredOptions.GetWords(mol)
row = [molGuid,molId]+words
layeredRows.append(row)
if options.doDescriptors:
descrs= calc.CalcDescriptors(mol)
row = [molGuid,molId]
row.extend(descrs)
descrRows.append(row)
if options.doPharm2D:
FingerprintUtils.sigFactory=sigFactory
fp= FingerprintUtils.BuildPharm2DFP(mol)
pkl = DbModule.binaryHolder(fp.ToBinary())
row = (molGuid,molId,pkl)
pharm2DRows.append(row)
if options.doGobbi2D:
FingerprintUtils.sigFactory=Gobbi_Pharm2D.factory
fp= FingerprintUtils.BuildPharm2DFP(mol)
pkl = DbModule.binaryHolder(fp.ToBinary())
row = (molGuid,molId,pkl)
gobbi2DRows.append(row)
if options.doMorganFps:
morgan = FingerprintUtils.BuildMorganFP(mol)
pkl = DbModule.binaryHolder(morgan.ToBinary())
row = (molGuid,molId,pkl)
morganRows.append(row)
if not i%500:
if len(pairRows):
pairCurs.executemany('insert into %s values (?,?,?,?)'%options.pairTableName,
pairRows)
pairRows = []
pairConn.Commit()
if len(fpRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.fpTableName,
fpRows)
fpRows = []
fpConn.Commit()
if len(layeredRows):
fpCurs.executemany('insert into %s values (?,?,%s)'%(options.layeredTableName,layeredQs),
layeredRows)
layeredRows = []
fpConn.Commit()
if len(descrRows):
descrCurs.executemany('insert into %s values (%s)'%(options.descrTableName,descrQuery),
descrRows)
descrRows = []
descrConn.Commit()
if len(pharm2DRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.pharm2DTableName,
pharm2DRows)
pharm2DRows = []
fpConn.Commit()
if len(gobbi2DRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.gobbi2DTableName,
gobbi2DRows)
gobbi2DRows = []
fpConn.Commit()
if len(morganRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.morganFpTableName,
morganRows)
morganRows = []
fpConn.Commit()
if not options.silent and not i%500:
logger.info(' Done: %d'%(i))
if len(pairRows):
pairCurs.executemany('insert into %s values (?,?,?,?)'%options.pairTableName,
pairRows)
pairRows = []
pairConn.Commit()
if len(fpRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.fpTableName,
fpRows)
fpRows = []
fpConn.Commit()
if len(layeredRows):
fpCurs.executemany('insert into %s values (?,?,%s)'%(options.layeredTableName,layeredQs),
layeredRows)
layeredRows = []
fpConn.Commit()
if len(descrRows):
descrCurs.executemany('insert into %s values (%s)'%(options.descrTableName,descrQuery),
descrRows)
descrRows = []
descrConn.Commit()
if len(pharm2DRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.pharm2DTableName,
pharm2DRows)
pharm2DRows = []
fpConn.Commit()
if len(gobbi2DRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.gobbi2DTableName,
gobbi2DRows)
gobbi2DRows = []
fpConn.Commit()
if len(morganRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.morganFpTableName,
morganRows)
morganRows = []
fpConn.Commit()
if not options.silent:
logger.info('Finished.')
if __name__=='__main__':
options,args = parser.parse_args()
if options.loadMols:
if len(args)!=1:
parser.error('please provide a filename argument')
dataFilename = args[0]
try:
dataFile = open(dataFilename,'r')
except IOError:
logger.error('input file %s does not exist'%(dataFilename))
sys.exit(0)
dataFile=None
if not options.outDir:
prefix = os.path.splitext(dataFilename)[0]
options.outDir=prefix
if not os.path.exists(options.outDir):
try:
os.mkdir(options.outDir)
except:
logger.error('could not create output directory %s'%options.outDir)
sys.exit(1)
if 1:
CreateDb(options,dataFilename)
else:
import cProfile
cProfile.run("CreateDb(options,dataFilename)","create.prof")
import pstats
p = pstats.Stats('create.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(25)
| AlexanderSavelyev/rdkit | Projects/DbCLI/CreateDb.py | Python | bsd-3-clause | 20,730 | [
"RDKit"
] | e28c7b1aeabc2585ba2c228882363431c11cf8ebf0e1eaddb712c34ab15c6f72 |
from __future__ import unicode_literals
import io
import json
import traceback
import hashlib
import os
import subprocess
import sys
from zipimport import zipimporter
from .compat import compat_realpath
from .utils import encode_compat_str
from .version import __version__
def rsa_verify(message, signature, key):
from hashlib import sha256
assert isinstance(message, bytes)
byte_size = (len(bin(key[0])) - 2 + 8 - 1) // 8
signature = ('%x' % pow(int(signature, 16), key[1], key[0])).encode()
signature = (byte_size * 2 - len(signature)) * b'0' + signature
asn1 = b'3031300d060960864801650304020105000420'
asn1 += sha256(message).hexdigest().encode()
if byte_size < len(asn1) // 2 + 11:
return False
expected = b'0001' + (byte_size - len(asn1) // 2 - 3) * b'ff' + b'00' + asn1
return expected == signature
def update_self(to_screen, verbose, opener):
"""Update the program file with the latest version from the repository"""
UPDATE_URL = 'https://blackjack4494.github.io//update/'
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
def sha256sum():
h = hashlib.sha256()
b = bytearray(128 * 1024)
mv = memoryview(b)
with open(os.path.realpath(sys.executable), 'rb', buffering=0) as f:
for n in iter(lambda: f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
to_screen('Current Build Hash %s' % sha256sum())
if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, 'frozen'):
to_screen('It looks like you installed youtube-dlc with a package manager, pip, setup.py or a tarball. Please use that to update.')
return
# compiled file.exe can find itself by
# to_screen(os.path.basename(sys.executable))
# and path to py or exe
# to_screen(os.path.realpath(sys.executable))
# Check if there is a new version
try:
newversion = opener.open(VERSION_URL).read().decode('utf-8').strip()
except Exception:
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: can\'t find the current version. Please try again later.')
to_screen('Visit https://github.com/blackjack4494/yt-dlc/releases/latest')
return
if newversion == __version__:
to_screen('youtube-dlc is up-to-date (' + __version__ + ')')
return
# Download and check versions info
try:
versions_info = opener.open(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except Exception:
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: can\'t obtain versions info. Please try again later.')
to_screen('Visit https://github.com/blackjack4494/yt-dlc/releases/latest')
return
if 'signature' not in versions_info:
to_screen('ERROR: the versions file is not signed or corrupted. Aborting.')
return
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
to_screen('ERROR: the versions file signature is invalid. Aborting.')
return
version_id = versions_info['latest']
def version_tuple(version_str):
return tuple(map(int, version_str.split('.')))
if version_tuple(__version__) >= version_tuple(version_id):
to_screen('youtube-dlc is up to date (%s)' % __version__)
return
to_screen('Updating to version ' + version_id + ' ...')
version = versions_info['versions'][version_id]
print_notes(to_screen, versions_info['versions'])
# sys.executable is set to the full pathname of the exe-file for py2exe
# though symlinks are not followed so that we need to do this manually
# with help of realpath
filename = compat_realpath(sys.executable if hasattr(sys, 'frozen') else sys.argv[0])
if not os.access(filename, os.W_OK):
to_screen('ERROR: no write permissions on %s' % filename)
return
# Py2EXE
if hasattr(sys, 'frozen'):
exe = filename
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
to_screen('ERROR: no write permissions on %s' % directory)
return
try:
urlh = opener.open(version['exe'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to download latest version')
to_screen('Visit https://github.com/blackjack4494/yt-dlc/releases/latest')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['exe'][1]:
to_screen('ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to write the new version')
return
try:
bat = os.path.join(directory, 'youtube-dlc-updater.bat')
with io.open(bat, 'w') as batfile:
batfile.write('''
@echo off
echo Waiting for file handle to be closed ...
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s" > NUL
echo Updated youtube-dlc to version %s.
start /b "" cmd /c del "%%~f0"&exit /b"
\n''' % (exe, exe, version_id))
subprocess.Popen([bat]) # Continues to run in the background
return # Do not show premature success messages
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to overwrite current version')
return
# Zip unix package
elif isinstance(globals().get('__loader__'), zipimporter):
try:
urlh = opener.open(version['bin'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to download latest version')
to_screen('Visit https://github.com/blackjack4494/yt-dlc/releases/latest')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['bin'][1]:
to_screen('ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to overwrite current version')
return
to_screen('Updated youtube-dlc. Restart youtube-dlc to use the new version.')
def get_notes(versions, fromVersion):
notes = []
for v, vdata in sorted(versions.items()):
if v > fromVersion:
notes.extend(vdata.get('notes', []))
return notes
def print_notes(to_screen, versions, fromVersion=__version__):
notes = get_notes(versions, fromVersion)
if notes:
to_screen('PLEASE NOTE:')
for note in notes:
to_screen(note)
| mx3L/archivczsk | build/plugin/src/resources/libraries/youtube_dl/update.py | Python | gpl-2.0 | 7,957 | [
"VisIt"
] | 5c978e036d24bccda9fbaadd5a07b23258d2424978d4267ec85094fcbaf61e2a |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import click
from flask.cli import with_appcontext
from flask import current_app
click.disable_unicode_literals_warning = True
import os
@click.group()
def pfamscan():
"""PfamScan commands"""
pass
def run(cmds, **kwargs):
cmd = ' && '.join(['({:})'.format(c) for c in cmds]).format(**kwargs)
click.echo("Running:\n{}".format(cmd))
os.system(cmd)
@pfamscan.command()
def install():
"""Download and compile pfamscan sourcecode."""
cmds = [
'wget -c http://ftp.ebi.ac.uk/pub/databases/Pfam/Tools/PfamScan.tar.gz',
'tar xvzf PfamScan.tar.gz',
'sudo perl - MCPAN - e"install Moose"'
]
run(cmds)
@pfamscan.command()
@click.option('--version', '-v', 'version',
type=click.STRING,
multiple=False,
default='31.0',
help='Version to download.')
@click.option('--ftp',
is_flag=True,
help='Force to use ftp.')
def index(version, ftp):
"""Download PfamA-full file."""
protocol = 'ftp' if ftp else 'http'
cmds = [
'mkdir -p ./Pfam{version}',
'wget -c {protocol}://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam{version}/Pfam-A.hmm.gz -O ./Pfam{version}/Pfam-A.hmm.gz',
'wget -c {protocol}://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam{version}/Pfam-A.hmm.dat.gz -O ./Pfam{version}/Pfam-A.hmm.dat.gz',
'gunzip -c ./Pfam{version}/Pfam-A.hmm.gz > ./Pfam{version}/Pfam-A.hmm',
'gunzip -c ./Pfam{version}/Pfam-A.hmm.dat.gz > ./Pfam{version}/Pfam-A.hmm.dat',
'./hmmpress ./Pfam{version}/Pfam-A.hmm',
'mkdir -p tmp',
]
run(cmds, version=version, protocol=protocol)
| ecolell/pfamserver | pfamserver/commands/library/pfamscan.py | Python | agpl-3.0 | 1,820 | [
"MOOSE"
] | 8d446fbb8ba92fe73c27f704053fa057723f9da0b0ad4b926167f4649d84f04d |
# -*- mode: Python; indent-tabs-mode: nil -*- #
import awe
import numpy as np
import sys
import os
#-----Simulation Default Values-----
iterations = 5
nwalkers = 4
nstates = 100
restarts = float('inf')
maxreps = 50
#-----WQ Default Values-----
wq_port = 9123
wq_fast_abort_multiplier = -1.0
#-----Get user options-----
def getopts():
import optparse
p = optparse.OptionParser()
# AWE params
p.add_option('-i', '--iterations', metavar="<int>", default=iterations, type=int, dest='iterations',
help='Number of AWE iterations (default=%s)' % iterations)
p.add_option('-r', '--restarts', metavar="<int>", default=restarts, type=int, dest='restarts',
help='Number of times to restart a failed task (default=%s)' % restarts)
p.add_option('-R', '--maxreps', metavar="<int>", default=maxreps, type=int, dest='maxreps',
help='Number of times to replicate a task (default=%s)' % maxreps)
# WQ params
p.add_option('-p', '--port', metavar="<int>", default=wq_port, type=int, dest='port',
help='Port for Work Queue to use (default=%s)' % wq_port)
p.add_option('-n', '--name', metavar="<string>", default=None, type="string", dest='name',
help='A project name to use with the catalog server (default=standalone mode)')
p.add_option('-f', '--fastabort', metavar="<float>", default=wq_fast_abort_multiplier, type=float, dest='fastabort',
help='Set the Work Queue fast abort multipler')
p.add_option('-M', '--monitor', default=False, dest='enable_monitor', action='store_true',
help='Enable monitoring of resource usage of tasks (default=disabled)')
p.add_option('-S', '--summaryfile', metavar="<file>", default=None, type="string", dest='summaryfile',
help='Print resource usage summary of tasks to <file>. Must be used with -M option. (default=wq-<pid>-resource-usage)')
p.add_option('-d', '--debug', metavar="<string>", default=None, type="string", dest='debug',
help='Print Work Queue debug messages')
opts, args = p.parse_args()
return opts
#-----Main Program------
if __name__ == "__main__":
opts = getopts()
cfg = awe.workqueue.Config()
cfg.fastabort = opts.fastabort
cfg.restarts = opts.restarts
cfg.maxreps = opts.maxreps
cfg.name = opts.name
cfg.port = opts.port
if opts.debug:
cfg.debug = opts.debug
if opts.enable_monitor:
cfg.monitor = True
cfg.summaryfile = opts.summaryfile
# The "main" function of the worker
cfg.execute('awe-instance-data/execute-task.sh')
# Binaries to run MD and assignment steps
cfg.cache('awe-generic-data/binaries/$OS-$ARCH/pdb2gmx')
cfg.cache('awe-generic-data/binaries/$OS-$ARCH/grompp')
cfg.cache('awe-generic-data/binaries/$OS-$ARCH/mdrun')
cfg.cache('awe-generic-data/binaries/$OS-$ARCH/awe-assign')
cfg.cache('awe-generic-data/gmxtopologies') # required for running gromacs for MD
cfg.cache('awe-instance-data/sim.mdp') # Gromacs simulation parameters
cfg.cache('awe-instance-data/env.sh') # setting up the worker execution environment
cfg.cache('awe-instance-data/cells.dat') # cell definitions
cfg.cache('awe-instance-data/CellIndices.dat') # cell atoms to use when assigning
cfg.cache('awe-instance-data/StructureIndices.dat') # walker atoms to use when assigning
# initialize the weights randomly
weights = np.random.random((nstates,nwalkers))
weights /= np.sum(weights.flatten())
# load a topology file
system = awe.System(topology = awe.PDB('awe-instance-data/topol.pdb'))
# 2-color awe needs states assigned to a region
partition = awe.SinkStates()
partition.add(0, *range(0,nstates//2))
partition.add(1, *range(nstates//2,nstates))
# load the initial cells and walkers
srcdir = 'awe-instance-data/pdbs/ala'
for i in range(nstates):
if i < nstates // 3:
cell = awe.Cell(i, core=0)
elif i > 2 * nstates // 3:
cell = awe.Cell(i, core=1)
else:
cell = awe.Cell(i)
color = partition.color(cell)
system.add_cell(cell)
for j in range(nwalkers):
pdbpath = os.path.join(srcdir, 'State%d-%d.pdb' % (i, j))
pdb = awe.PDB(pdbpath)
w = awe.Walker(start=pdb.coords, assignment=i, color=color, weight=weights[i,j], cellid=cell.id)
system.add_walker(w)
# define the AWE resampling algorithm to use
multicolor = awe.resample.MultiColor(nwalkers, partition)
resample = awe.resample.SaveWeights(multicolor)
adaptive = awe.AWE( wqconfig = cfg,
system = system,
iterations = opts.iterations,
resample = resample,
checkpointfreq = 1,
verbose=True,
log_it=True)
adaptive.run()
print('Run time:', awe.time.time(), 's')
sys.exit(0)
| cooperative-computing-lab/awe | awe-ala.py | Python | gpl-2.0 | 5,432 | [
"Gromacs"
] | b492a3119d9b5fd62d37ada2f54b0fff94cbc53debb621e9fac609c9885bf9c3 |
""" Collection of user jobs for testing purposes
"""
import os
from DIRAC.tests.Utilities.utils import find_all
from DIRAC.Interfaces.API.Job import Job
from DIRAC.Interfaces.API.Dirac import Dirac
# parameters
# Common functions
def getJob( jobClass = None ):
if not jobClass:
jobClass = Job
oJob = jobClass()
return oJob
def getDIRAC( diracClass = None ):
if not diracClass:
diracClass = Dirac
oDirac = diracClass()
return oDirac
def baseToAllJobs( jName, jobClass = None ):
print "**********************************************************************************************************"
print "\n Submitting job ", jName
J = getJob( jobClass )
J.setName( jName )
J.setCPUTime( 17800 )
return J
def endOfAllJobs( J ):
result = getDIRAC().submit( J )
print "Job submission result:", result
if result['OK']:
jobID = int( result['Value'] )
print "Submitted with job ID:", jobID
return result
# List of jobs
def helloWorld():
J = baseToAllJobs( 'helloWorld' )
J.setInputSandbox( [find_all( 'exe-script.py', os.environ['DIRAC'], 'tests/Workflow' )[0]] )
J.setExecutable( "exe-script.py", "", "helloWorld.log" )
return endOfAllJobs( J )
def mpJob():
J = baseToAllJobs( 'mpJob' )
J.setInputSandbox( [find_all( 'mpTest.py', os.environ['DIRAC'], 'tests/Utilities' )[0]] +
[find_all( 'testMpJob.sh', os.environ['DIRAC'], 'tests/Utilities' )[0]])
J.setExecutable( 'testMpJob.sh mpTest.py' )
J.setTag( 'MultiProcessor' )
return endOfAllJobs( J )
| Andrew-McNab-UK/DIRAC | tests/Utilities/testJobDefinitions.py | Python | gpl-3.0 | 1,543 | [
"DIRAC"
] | deea7d8d84b56b078f951919daaac8d6330371859f27b706206c27a45818b7b3 |
from ase.constraints import FixAtoms as ASE_FixAtoms
from ase.constraints import Filter as ASE_Filter
import numpy as np
class ConstraintMixin:
"""Mixin class FixAtoms and Filter"""
def prepare_for_asap(self, atoms):
"""Prepare this constraint for optimized Asap dynamics
This function must be called once the atoms are known by
all dynamics supporting parallel MD.
"""
# Store the arrays of the atoms, not the atoms, to prevent cyclic references.
self.atoms_arrays = atoms.arrays
assert self.indexname not in self.atoms_arrays
idx = self.index
self.asap_ready = True
self.index = idx
del self._index
assert self.indexname in self.atoms_arrays
def set_index(self, idx):
if self.asap_ready:
natoms = len(self.atoms_arrays['positions'])
if idx.dtype == bool:
# Boolean - must be a mask
assert len(idx) == natoms
else:
# Must be a list of indices. Convert to a mask
idx2 = np.zeros(natoms, bool)
idx2[idx] = True
idx = idx2
self.atoms_arrays[self.indexname] = idx
else:
self._index = idx
def get_index(self):
if self.asap_ready:
return self.atoms_arrays[self.indexname]
else:
return self._index
class FixAtoms(object, ConstraintMixin, ASE_FixAtoms):
def __init__(self, indices=None, mask=None):
self.pre_init()
ASE_FixAtoms.__init__(self, indices, mask)
def pre_init(self):
self.indexname = "FixAtoms_index"
self.asap_ready = False
def copy(self):
if self.index.dtype == bool:
return FixAtoms(mask=self.index.copy())
else:
return FixAtoms(indices=self.index.copy())
def __get_state__(self):
return {'data': self.index,
'version': 1}
def __set_state__(self, state):
try:
assert(state['version'] == 1)
except KeyError:
print state
raise
self.pre_init()
self.index = state['data']
index = property(ConstraintMixin.get_index, ConstraintMixin.set_index)
class Filter(object, ConstraintMixin, ASE_Filter):
def __init__(self, atoms, indices=None, mask=None):
"""Filter atoms.
This filter can be used to hide degrees of freedom in an Atoms
object.
Parameters
----------
indices : list of int
Indices for those atoms that should remain visible.
mask : list of bool
One boolean per atom indicating if the atom should remain
visible or not.
"""
self.pre_init()
ASE_Filter.__init__(self, atoms, indices, mask)
self.prepare_for_asap(atoms)
def pre_init(self):
self.indexname = "FixAtoms_index"
self.asap_ready = False
index = property(ConstraintMixin.get_index, ConstraintMixin.set_index)
def check_asap_constraints(atoms, allowed=None):
"""Check that atoms only have allowed constraints. Return True if so, otherwise False.
An optional second parameter can be a tuple of allowed constraints.
"""
if allowed is None:
allowed = (FixAtoms,)
if len(atoms.constraints) == 0:
return True
if len(atoms.constraints) > 1:
return False
return isinstance(atoms.constraints[0], allowed)
| auag92/n2dm | Asap-3.8.4/Python/asap3/constraints.py | Python | mit | 3,624 | [
"ASE"
] | dd8e9a25b69944c34fc41ab6d3ef457c02ad0ca99361d670e104da5e5da03ed0 |
###UI
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
import statistics
import sys, string
import shutil
import os.path
import unique
import update; reload(update)
import export
import ExpressionBuilder
import time
import webbrowser
import traceback
import AltAnalyze
from sys import argv
"""
import numpy
import scipy
from PIL import Image as PIL_Image
import ImageTk
import matplotlib
import matplotlib.pyplot as pylab
"""
try:
try:
import WikiPathways_webservice
except Exception:
#print traceback.format_exc()
if 'URLError' in traceback.format_exc():
print 'No internet connection found'
else:
print 'WikiPathways visualization not supported (requires installation of suds)'
try:
from PIL import Image as PIL_Image
try: import ImageTk
except Exception: from PIL import ImageTk
except Exception:
#print traceback.format_exc()
#print 'Python Imaging Library not installed... using default PNG viewer'
None
try:
### Only used to test if matplotlib is installed
#import matplotlib
#import matplotlib.pyplot as pylab
None
except Exception:
#print traceback.format_exc()
print 'Graphical output mode disabled (requires matplotlib, numpy and scipy)'
None
except Exception:
None
command_args = string.join(sys.argv,' ')
if len(sys.argv[1:])>1 and '-' in command_args and '--GUI' not in command_args: null=[]
else:
try:
import Tkinter
#import bwidget; from bwidget import *
from Tkinter import *
import PmwFreeze
from Tkconstants import LEFT
import tkMessageBox
import tkFileDialog
except Exception: print "\nPmw or Tkinter not found... proceeding with manual input"
mac_print_mode = 'no'
if os.name == 'posix': mac_print_mode = 'yes' #os.name is 'posix', 'nt', 'os2', 'mac', 'ce' or 'riscos'
debug_mode = 'no'
def filepath(filename):
fn = unique.filepath(filename)
return fn
def osfilepath(filename):
fn = filepath(filename)
fn = string.replace(fn,'\\','/')
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def getFolders(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if entry[-4:] != ".txt" and entry[-4:] != ".csv" and ".zip" not in entry: dir_list2.append(entry)
return dir_list2
def returnDirectoriesNoReplace(dir):
dir_list = unique.returnDirectoriesNoReplace(dir); dir_list2 = []
for entry in dir_list:
if '.' not in entry and 'affymetrix' not in entry:
if 'EnsMart' in entry: dir_list2.append(entry)
return dir_list2
def returnFilesNoReplace(dir):
dir_list = unique.returnDirectoriesNoReplace(dir); dir_list2 = []
for entry in dir_list:
if '.' in entry: dir_list2.append(entry)
return dir_list2
def identifyCELfiles(dir,array_type,vendor):
dir_list = read_directory(dir); dir_list2=[]; full_dir_list=[]
datatype = 'arrays'
types={}
for file in dir_list:
original_file = file
file_lower = string.lower(file); proceed = 'no'
### "._" indicates a mac alias
if ('.cel' in file_lower[-4:] and '.cel.' not in file_lower) and file_lower[:2] != '._':
proceed = 'yes'
elif ('.bed' in file_lower[-4:] or '.tab' in file_lower or '.junction_quantification.txt' in file_lower or '.bam' in file_lower) and file_lower[:2] != '._' and '.bai' not in file_lower:
proceed = 'yes'
datatype = 'RNASeq'
elif array_type == "3'array" and '.cel' not in file_lower[-4:] and '.txt' in file_lower[-4:] and vendor != 'Affymetrix':
proceed = 'yes'
if proceed == 'yes':
if '__' in file and '.cel' not in string.lower(file):
#print file,string.split(file,'__'),file[-4:]
file=string.split(file,'__')[0]+file[-4:]
if '.tab' in original_file: file = string.replace(file,'.txt','.tab')
elif '.bed' in original_file: file = string.replace(file,'.txt','.bed')
if '.TAB' in original_file: file = string.replace(file,'.txt','.TAB')
elif '.BED' in original_file: file = string.replace(file,'.txt','.BED')
dir_list2.append(file)
file = dir+'/'+file
full_dir_list.append(file)
dir_list2 = unique.unique(dir_list2)
full_dir_list = unique.unique(full_dir_list)
dir_list2.sort(); full_dir_list.sort()
if datatype == 'RNASeq':
checkBEDFileFormat(dir) ### Make sure the names are wonky
dir_list3=[]
c = string.lower(string.join(dir_list2,''))
if '.bam' in c and '.bed' in c: #If bed present use bed and not bam
for i in dir_list2:
if '.bam' not in i:
dir_list3.append(i)
dir_list2 = dir_list3
elif '.bam' in c:
for i in dir_list2:
if '.bam' in i:
dir_list3.append(string.replace(i,'.bam','.bed'))
elif '.BAM' in i:
dir_list3.append(string.replace(i,'.BAM','.bed'))
dir_list2 = dir_list3
return dir_list2,full_dir_list
def checkBEDFileFormat(bed_dir):
""" This checks to see if some files have two underscores and one has none or if double underscores are missing from all."""
dir_list = read_directory(bed_dir)
condition_db={}
for filename in dir_list:
if '.tab' in string.lower(filename) or '.bed' in string.lower(filename) or '.junction_quantification.txt' in string.lower(filename):
condition_db[filename]=[]
if len(condition_db)==0: ### Occurs if BAMs present but not .bed files
for filename in dir_list:
if '.bam' in string.lower(filename):
condition_db[filename]=[]
### Check to see if exon.bed and junction.bed file names are propper or faulty (which will result in downstream errors)
double_underscores=[]
no_doubles=[]
for condition in condition_db:
if '__' in condition:
double_underscores.append(condition)
else:
no_doubles.append(condition)
exon_beds=[]
junctions_beds=[]
if len(double_underscores)>0 and len(no_doubles)>0:
### Hence, a problem is likely due to inconsistent naming
print_out = 'The input files appear to have inconsistent naming. If both exon and\njunction sample data are present, make sure they are named propperly.\n\n'
print_out += 'For example: cancer1__exon.bed, cancer1__junction.bed\n(double underscore required to match these samples up)!\n\n'
print_out += 'Exiting AltAnalyze'
IndicatorWindowSimple(print_out,'Quit')
sys.exit()
elif len(no_doubles)>0:
for condition in no_doubles:
condition = string.lower(condition)
if 'exon' in condition:
exon_beds.append(condition)
if 'junction' in condition:
junctions_beds.append(condition)
if len(exon_beds)>0 and len(junctions_beds)>0:
print_out = 'The input files appear to have inconsistent naming. If both exon and\njunction sample data are present, make sure they are named propperly.\n\n'
print_out += 'For example: cancer1__exon.bed, cancer1__junction.bed\n(double underscore required to match these samples up)!\n\n'
print_out += 'Exiting AltAnalyze'
IndicatorWindowSimple(print_out,'Quit')
sys.exit()
def identifyArrayType(full_dir_list):
#import re
arrays={}; array_type=None ### Determine the type of unique arrays in each directory
for filename in full_dir_list:
fn=filepath(filename); ln=0
for line in open(fn,'rU').xreadlines():
if '\x00' in line: ### Simple way of determining if it is a version 4 file with encoding
line = string.replace(line,'\x00\x00',' ') ### retains spaces
line = string.replace(line,'\x00','') ### returns human readable line
if ln<150:
data = cleanUpLine(line); ln+=1
if 'sq' in data:
try:
#fileencoding = "iso-8859-1"
#txt = line.decode(fileencoding); print [txt];kill ### This works but so does the above
array_info,null = string.split(data,'sq')
array_info = string.split(array_info,' ')
array_type = array_info[-1]
if '.' in array_type: array_type,null = string.split(array_type,'.')
#array_type = string.join(re.findall(r"\w",array_type),'') ### should force only alphanumeric but doesn't seem to always work
arrays[array_type]=[]
#print array_type+'\t'+filename
break
except Exception: null=[]
elif 'affymetrix-array-type' in data:
null, array_type = string.split(data,'affymetrix-array-type')
if '.' in array_type: array_type,null = string.split(array_type,'.')
arrays[array_type]=[]
"""else: ### some CEL file versions are encoded
fileencoding = "iso-8859-1"
txt = line.decode(fileencoding)
print txt;kill"""
else: break
array_ls = []
for array in arrays:
if len(array)<50: array_ls.append(array) ### Occurs with version 4 encoding (bad entries added)
return array_ls, array_type
def getAffyFilesRemote(array_name,arraytype,species):
global backSelect; global array_type; global debug_mode
debug_mode = 'yes'
backSelect = 'yes'
array_type = arraytype
library_dir, annotation_dir, bgp_file, clf_file = getAffyFiles(array_name,species)
return library_dir, annotation_dir, bgp_file, clf_file
def getAffyFiles(array_name,species):#('AltDatabase/affymetrix/LibraryFiles/'+library_file,species)
sa = supproted_array_db[array_name]; library_file = sa.LibraryFile(); annot_file = sa.AnnotationFile(); original_library_file = library_file
filename = 'AltDatabase/affymetrix/LibraryFiles/'+library_file
fn=filepath(filename); library_dir=filename; bgp_file = ''; clf_file = ''
local_lib_files_present = False
if backSelect == 'yes': warn = 'no'
else: warn = 'yes'
try:
for line in open(fn,'rU').xreadlines():break
### Hence, the library file was found!!!
local_lib_files_present = True
input_cdf_file = filename
if '.pgf' in input_cdf_file:
###Check to see if the clf and bgp files are present in this directory
icf_list = string.split(input_cdf_file,'/'); parent_dir = string.join(icf_list[:-1],'/'); cdf_short = icf_list[-1]
clf_short = string.replace(cdf_short,'.pgf','.clf')
if array_type == 'exon' or array_type == 'junction':
bgp_short = string.replace(cdf_short,'.pgf','.antigenomic.bgp')
else: bgp_short = string.replace(cdf_short,'.pgf','.bgp')
try: dir_list = read_directory(parent_dir)
except Exception: dir_list = read_directory('/'+parent_dir)
if clf_short in dir_list and bgp_short in dir_list:
pgf_file = input_cdf_file; clf_file = string.replace(pgf_file,'.pgf','.clf')
if array_type == 'exon' or array_type == 'junction': bgp_file = string.replace(pgf_file,'.pgf','.antigenomic.bgp')
else: bgp_file = string.replace(pgf_file,'.pgf','.bgp')
else:
try:
print_out = "The directory;\n"+parent_dir+"\ndoes not contain either a .clf or antigenomic.bgp\nfile, required for probeset summarization."
IndicatorWindow(print_out,'Continue')
except Exception: print print_out; sys.exit()
except Exception:
print_out = "AltAnalyze was not able to find a library file\nfor your arrays. Would you like AltAnalyze to\nautomatically download these files?"
try:
dw = DownloadWindow(print_out,'Download by AltAnalyze','Select Local Files')
warn = 'no' ### If already downloading the library, don't warn to download the csv too
dw_results = dw.Results(); option = dw_results['selected_option']
except Exception: option = 1 ### Occurs when Tkinter is not present - used by CommandLine mode
if option == 1:
library_file = string.replace(library_file,'.cdf','.zip')
filename = 'AltDatabase/affymetrix/LibraryFiles/'+library_file
input_cdf_file = filename
if '.pgf' in input_cdf_file:
pgf_file = input_cdf_file; clf_file = string.replace(pgf_file,'.pgf','.clf')
if array_type == 'exon' or array_type == 'junction': bgp_file = string.replace(pgf_file,'.pgf','.antigenomic.bgp')
else: bgp_file = string.replace(pgf_file,'.pgf','.bgp')
filenames = [pgf_file+'.gz',clf_file+'.gz',bgp_file+'.gz']
if 'Glue' in pgf_file:
kil_file = string.replace(pgf_file,'.pgf','.kil') ### Only applies to the Glue array
filenames.append(kil_file+'.gz')
else: filenames = [input_cdf_file]
for filename in filenames:
var_list = filename,'LibraryFiles'
if debug_mode == 'no': StatusWindow(var_list,'download')
else:
for filename in filenames:
continue_analysis = update.downloadCurrentVersion(filename,'LibraryFiles','')
try: os.remove(filepath(filename)) ### Not sure why this works now and not before
except Exception: null=[]
else: library_dir = ''
filename = 'AltDatabase/affymetrix/'+species+'/'+annot_file
fn=filepath(filename); annotation_dir = filename
try:
for line in open(fn,'rU').xreadlines():break
except Exception:
if warn == 'yes' and local_lib_files_present == False:
### Indicates that library file wasn't present to prior to this method
print_out = "AltAnalyze was not able to find a CSV annotation file\nfor your arrays. Would you like AltAnalyze to\nautomatically download these files?"
try:
dw = DownloadWindow(print_out,'Download by AltAnalyze','Select Local Files'); warn = 'no'
dw_results = dw.Results(); option = dw_results['selected_option']
except OSError: option = 1 ### Occurs when Tkinter is not present - used by CommandLine mode
else:
try: option = option
except Exception: option = 2
if option == 1 or debug_mode=='yes':
annot_file += '.zip'
filenames = ['AltDatabase/affymetrix/'+species+'/'+annot_file]
for filename in filenames:
var_list = filename,'AnnotationFiles'
if debug_mode == 'no': StatusWindow(var_list,'download')
else:
for filename in filenames:
try: update.downloadCurrentVersionUI(filename,'AnnotationFiles','',Tk())
except Exception:
try: update.downloadCurrentVersion(filename,'AnnotationFiles',None)
except Exception: pass ### Don't actually need Affy's annotations in most cases - GO-Elite used instead
try: os.remove(filepath(filename)) ### Not sure why this works now and not before
except Exception: null=[]
else: annotation_dir = ''
return library_dir, annotation_dir, bgp_file, clf_file
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
########### Status Window Functions ###########
def copyFiles(file1,file2,root):
print 'Copying files from:\n',file1
data = export.ExportFile(file2) ### Ensures the directory exists
try: shutil.copyfile(file1,file2)
except Exception: print "This file already exists in the destination directory."
root.destroy()
class StatusWindow:
def __init__(self,info_list,analysis_type,windowType='parent'):
try:
if windowType == 'child':
root = Toplevel()
else:
root = Tk()
self._parent = root
root.title('AltAnalyze version 2.0.9.3 beta')
statusVar = StringVar() ### Class method for Tkinter. Description: "Value holder for strings variables."
height = 300; width = 700
if os.name != 'nt': height+=100; width+=50
self.sf = PmwFreeze.ScrolledFrame(self._parent,
labelpos = 'n', label_text = 'Download File Status Window',
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Output')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
Label(group.interior(),width=180,height=152,justify=LEFT, bg='black', fg = 'white',anchor=NW,padx = 5,pady = 5, textvariable=statusVar).pack(fill=X,expand=Y)
status = StringVarFile(statusVar,root) ### Captures the stdout (or print) to the GUI instead of to the terminal
self.original_sys_out = sys.stdout ### Save the original stdout mechanism
#ProgressBar('Download',self._parent)
except Exception: None
if analysis_type == 'download':
filename,dir = info_list
try: sys.stdout = status; root.after(100,update.downloadCurrentVersionUI(filename,dir,None,self._parent))
except Exception:
update.downloadCurrentVersion(filename,dir,None)
if analysis_type == 'copy':
file1,file2 = info_list
try: sys.stdout = status; root.after(100,copyFiles(file1,file2,self._parent))
except Exception: copyFiles(file1,file2,None)
if analysis_type == 'getOnlineDBConfig':
file_location_defaults = info_list
try: sys.stdout = status; root.after(100,getOnlineDBConfig(file_location_defaults,self._parent))
except Exception,e: getOnlineDBConfig(file_location_defaults,None)
if analysis_type == 'getOnlineEliteDatabase':
file_location_defaults,db_version,new_species_codes,update_goelite_resources = info_list
try: sys.stdout = status; root.after(100,getOnlineEliteDatabase(file_location_defaults,db_version,new_species_codes,update_goelite_resources,self._parent))
except Exception,e: getOnlineEliteDatabase(file_location_defaults,db_version,new_species_codes,update_goelite_resources,None)
if analysis_type == 'getAdditionalOnlineResources':
species_code,additional_resources = info_list
try: sys.stdout = status; root.after(100,getAdditionalOnlineResources(species_code,additional_resources,self._parent))
except Exception,e: getAdditionalOnlineResources(species_code,additional_resources,None)
if analysis_type == 'createHeatMap':
filename, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast = info_list
try: sys.stdout = status; root.after(100,createHeatMap(filename, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, self._parent))
except Exception,e: createHeatMap(filename, row_method, row_metric, column_method, column_metric, color_gradient, transpose,contrast,None)
if analysis_type == 'performPCA':
filename, pca_labels, dimensions, pca_algorithm, transpose, geneSetName, species = info_list
try: sys.stdout = status; root.after(100,performPCA(filename, pca_labels, pca_algorithm, transpose, self._parent, plotType = dimensions, geneSetName=geneSetName, species=species))
except Exception,e: performPCA(filename, pca_labels, pca_algorithm, transpose, None, plotType = dimensions, geneSetName=geneSetName, species=species)
if analysis_type == 'runLineageProfiler':
fl, filename, vendor, custom_markerFinder, geneModel_file, modelDiscovery = info_list
try: sys.stdout = status; root.after(100,runLineageProfiler(fl, filename, vendor, custom_markerFinder, geneModel_file, self._parent, modelSize=modelDiscovery))
except Exception,e: runLineageProfiler(fl, filename, vendor, custom_markerFinder, geneModel_file, None, modelSize=modelDiscovery)
if analysis_type == 'MergeFiles':
files_to_merge, join_option, ID_option, output_merge_dir = info_list
try: sys.stdout = status; root.after(100,MergeFiles(files_to_merge, join_option, ID_option, output_merge_dir, self._parent))
except Exception,e: MergeFiles(files_to_merge, join_option, ID_option, output_merge_dir, None)
if analysis_type == 'VennDiagram':
files_to_merge, output_venn_dir = info_list
try: sys.stdout = status; root.after(100,vennDiagram(files_to_merge, output_venn_dir, self._parent))
except Exception,e: vennDiagram(files_to_merge, output_venn_dir, None)
if analysis_type == 'AltExonViewer':
species,platform,exp_file,gene,show_introns,analysisType = info_list
try: sys.stdout = status; root.after(100,altExonViewer(species,platform,exp_file,gene,show_introns,analysisType,self._parent))
except Exception,e: altExonViewer(species,platform,exp_file,gene,show_introns,analysisType,None)
if analysis_type == 'network':
inputDir,inputType,outputdir,interactionDirs,degrees,expressionFile,gsp = info_list
try: sys.stdout = status; root.after(100,networkBuilder(inputDir,inputType,outputdir,interactionDirs,degrees,expressionFile,gsp, self._parent))
except Exception,e: networkBuilder(inputDir,inputType,outputdir,interactionDirs,degrees,expressionFile,gsp, None)
if analysis_type == 'IDConverter':
filename, species_code, input_source, output_source = info_list
try: sys.stdout = status; root.after(100,IDconverter(filename, species_code, input_source, output_source, self._parent))
except Exception,e: IDconverter(filename, species_code, input_source, output_source, None)
if analysis_type == 'predictGroups':
try: expFile, mlp_instance, gsp, reportOnly = info_list
except Exception: expFile, mlp_instance, gsp, reportOnly = info_list
try: sys.stdout = status; root.after(100,predictSampleExpGroups(expFile, mlp_instance, gsp, reportOnly, self._parent))
except Exception,e: predictSampleExpGroups(expFile, mlp_instance, gsp, reportOnly, None)
if analysis_type == 'preProcessRNASeq':
species,exp_file_location_db,dataset,mlp_instance = info_list
sys.stdout = status; root.after(100,preProcessRNASeq(species,exp_file_location_db,dataset,mlp_instance, self._parent))
try:
self._parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
self._parent.mainloop()
self._parent.destroy()
except Exception: None ### This is what typically get's called
try:
sys.stdout = self.original_sys_out ### Has to be last to work!!!
except Exception: None
def deleteWindow(self):
#tkMessageBox.showwarning("Quit Selected","Use 'Quit' button to end program!",parent=self._parent)
self._parent.destroy(); sys.exit()
def quit(self):
try: self._parent.destroy(); sys.exit() #self._parent.quit();
except Exception: sys.exit() #self._parent.quit();
def SysOut(self):
return self.original_sys_out
def preProcessRNASeq(species,exp_file_location_db,dataset,mlp_instance,root):
for dataset in exp_file_location_db:
flx = exp_file_location_db[dataset]
if root == None: display=False
else: display=True
runKallisto = False
try:
import RNASeq, ExonArray
expFile = flx.ExpFile()
count = verifyFileLength(expFile)
try: fastq_folder = flx.RunKallisto()
except Exception: fastq_folder = []
if len(fastq_folder)>0 and count<2:
print 'Pre-processing input files'
try:
parent_dir = export.findParentDir(expFile)
flx = exp_file_location_db[dataset]; flx.setRootDir(parent_dir)
fastq_folder = flx.RunKallisto()
runKallisto = True
RNASeq.runKallisto(species,dataset,flx.RootDir(),fastq_folder,returnSampleNames=False)
except Exception:
print 'Kallisto failed due to:',traceback.format_exc()
try: root.destroy()
except Exception: null=[]
return None
elif len(fastq_folder)>0 and count>1:
try: root.destroy()
except Exception: null=[]
return None ### Already run
elif count<2:
print 'Pre-processing input files'
try: biotypes = RNASeq.alignExonsAndJunctionsToEnsembl(species,exp_file_location_db,dataset,Multi=mlp_instance)
except Exception: print 'unknown'
biotypes = getBiotypes(expFile)
else:
biotypes = getBiotypes(expFile)
array_linker_db,array_names = ExonArray.remoteExonProbesetData(expFile,{},'arraynames',array_type)
steady_state_export = expFile[:-4]+'-steady-state.txt'
normalize_feature_exp = flx.FeatureNormalization()
try: excludeLowExpressionExons = flx.excludeLowExpressionExons()
except Exception: excludeLowExpressionExons = True
if flx.useJunctionsForGeneExpression():
if 'junction' in biotypes:
feature = 'junction'
else:
feature = 'exon'
else:
### Use all exons either way at this step since more specific parameters will apply to the next iteration
if 'exon' in biotypes:
feature = 'exon'
else:
feature = 'junction'
probeset_db = getAllKnownFeatures(feature)
print 'Calculating gene-level expression values from',feature+'s'
RNASeq.calculateGeneLevelStatistics(steady_state_export,species,probeset_db,normalize_feature_exp,array_names,fl,excludeLowExp=excludeLowExpressionExons,exportRPKMs=True)
#if display == False: print print_out
#try: InfoWindow(print_out, 'Continue')
#except Exception: None
try: root.destroy()
except Exception: null=[]
except Exception:
error = traceback.format_exc()
try:
logfile = filepath(fl.RootDir()+'Error.log')
log_report = open(logfile,'a')
log_report.write(traceback.format_exc())
except Exception:
None
print_out = 'Expression quantification failed..\n',error
try: print print_out
except Exception: pass ### Windows issue with the Tk status window stalling after pylab.show is called
try: WarningWindow(print_out,'Continue')
except Exception: pass
try: root.destroy()
except Exception: null=[]
def getBiotypes(filename):
biotypes={}
firstRow=True
if 'RawSpliceData' in filename: index = 2
else: index = 0
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
t = string.split(line,'\t')
if firstRow:
firstRow = False
else:
if '-' in t[index]:
biotypes['junction']=[]
else:
biotypes['exon']=[]
except Exception: pass
return biotypes
def getAllKnownFeatures(feature):
### Simple method to extract gene features of interest
import ExonArrayEnsemblRules
source_biotype = 'mRNA'
if array_type == 'gene': source_biotype = 'gene'
elif array_type == 'junction': source_biotype = 'junction'
if array_type == 'AltMouse':
import ExpressionBuilder
probeset_db,constitutive_gene_db = ExpressionBuilder.importAltMerge('full')
source_biotype = 'AltMouse'
elif vendor == 'Affymetrix' or array_type == 'RNASeq':
if array_type == 'RNASeq':
source_biotype = array_type, fl.RootDir()
dbs = ExonArrayEnsemblRules.getAnnotations('no','Ensembl',source_biotype,species)
probeset_db = dbs[0]; del dbs
probeset_gene_db={}
for probeset in probeset_db:
probe_data = probeset_db[probeset]
gene = probe_data[0]; external_exonid = probe_data[-2]
if len(external_exonid)>2: ### These are known exon only (e.g., 'E' probesets)
proceed = True
if feature == 'exon': ### Restrict the analysis to exon RPKM or count data for constitutive calculation
if '-' in probeset and '_' not in probeset: proceed = False
else:
if '-' not in probeset and '_' not in probeset: proceed = False ### Use this option to override
if proceed:
try: probeset_gene_db[gene].append(probeset)
except Exception: probeset_gene_db[gene] = [probeset]
return probeset_gene_db
def RemotePredictSampleExpGroups(expFile, mlp_instance, gsp, globalVars):
global species
global array_type
species, array_type = globalVars
predictSampleExpGroups(expFile, mlp_instance, gsp, False, None)
def predictSampleExpGroups(expFile, mlp_instance, gsp, reportOnly, root):
global graphic_links; graphic_links=[];
if root == None: display=False
else: display=True
import RNASeq,ExpressionBuilder; reload(RNASeq) ### allows for GUI testing with restarting
try:
if gsp.FeaturestoEvaluate() != 'AltExon':
graphic_links = RNASeq.singleCellRNASeqWorkflow(species, array_type, expFile, mlp_instance, parameters=gsp, reportOnly=reportOnly)
if gsp.FeaturestoEvaluate() != 'Genes':
graphic_links2,cluster_input_file=ExpressionBuilder.unbiasedComparisonSpliceProfiles(fl.RootDir(),species,array_type,expFile=fl.CountsFile(),min_events=gsp.MinEvents(),med_events=gsp.MedEvents())
gsp.setCountsCutoff(0);gsp.setExpressionCutoff(0)
graphic_links3 = RNASeq.singleCellRNASeqWorkflow(species, 'exons', cluster_input_file, mlp_instance, parameters=gsp, reportOnly=reportOnly)
graphic_links+=graphic_links2+graphic_links3
print_out = 'Predicted sample groups saved.'
if len(graphic_links)==0:
print_out = 'No predicted sample groups identified. Try different parameters.'
if display == False: print print_out
try: InfoWindow(print_out, 'Continue')
except Exception: None
try: root.destroy()
except Exception: null=[]
except Exception:
error = traceback.format_exc()
if 'score_ls' in error:
error = 'Unknown error likely due to too few genes resulting from the filtering options.'
if 'options_result_in_no_genes' in error:
error = 'No genes differentially expressed with the input criterion'
print_out = 'Predicted sample export failed..\n',error
try: print print_out
except Exception: pass ### Windows issue with the Tk status window stalling after pylab.show is called
try: WarningWindow(print_out,'Continue')
except Exception: pass
try: root.destroy()
except Exception: null=[]
try: print error
except Exception: pass
def openDirectory(output_dir):
if os.name == 'nt':
try: os.startfile('"'+output_dir+'"')
except Exception: os.system('open "'+output_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+output_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+output_dir+'"')
def networkBuilder(inputDir,inputType,outputdir,interactionDirs_short,degrees,expressionFile,gsp,root):
species = gsp.Species()
Genes = gsp.GeneSelection()
PathwaySelect = gsp.PathwaySelect()
OntologyID = gsp.OntologyID()
GeneSet = gsp.GeneSet()
IncludeExpIDs = gsp.IncludeExpIDs()
if 'Ontology' in GeneSet: directory = 'nested'
else: directory = 'gene-mapp'
interactionDirs=[]
obligatorySet=[] ### Always include interactions from these if associated with any input ID period
secondarySet=[]
print 'Species:',species, '| Algorithm:',degrees, ' | InputType:',inputType, ' | IncludeExpIDs:',IncludeExpIDs
print 'Genes:',Genes
print 'OntologyID:',gsp.OntologyID(), gsp.PathwaySelect(), GeneSet
print ''
if interactionDirs_short == None or len(interactionDirs_short)==0:
interactionDirs_short = ['WikiPathways']
for i in interactionDirs_short:
if i == None: None
else:
if 'common-' in i:
i = string.replace(i,'common-','')
secondarySet.append(i)
if 'all-' in i:
i = string.replace(i,'all-','')
obligatorySet.append(i)
fn = filepath('AltDatabase/goelite/'+species+'/gene-interactions/Ensembl-'+i+'.txt')
interactionDirs.append(fn)
print "Interaction Files:",string.join(interactionDirs_short,' ')
import InteractionBuilder
try:
output_filename = InteractionBuilder.buildInteractions(species,degrees,inputType,inputDir,outputdir,interactionDirs,Genes=Genes,
geneSetType=GeneSet,PathwayFilter=PathwaySelect,OntologyID=OntologyID,directory=directory,expressionFile=expressionFile,
obligatorySet=obligatorySet,secondarySet=secondarySet,IncludeExpIDs=IncludeExpIDs)
if output_filename==None:
print_out = 'Network creation/visualization failed..\nNo outputs produced... try different options.\n'
print_out += traceback.format_exc()
if root != None and root != '':
try: InfoWindow(print_out, 'Continue')
except Exception: None
else:
if root != None and root != '':
try: openDirectory(outputdir)
except Exception: None
else:
print 'Results saved to:',output_filename
if root != None and root != '':
GUI(root,'ViewPNG',[],output_filename) ### The last is default attributes (should be stored as defaults in the option_db var)
except Exception:
error = traceback.format_exc()
if 'queryGeneError' in error:
print_out = 'No valid gene IDs present in the input text search\n(valid IDs = FOXP1,SOX2,NANOG,TCF7L1)'
else: print_out = 'Network creation/visualization failed..\n',error
if root != None and root != '':
try: InfoWindow(print_out, 'Continue')
except Exception: None
try: root.destroy()
except Exception: None
def vennDiagram(files_to_merge, output_venn_dir, root, display=True):
import VennDiagram
if root == None and display==False: display=False
else: display=True
try:
VennDiagram.compareInputFiles(files_to_merge,output_venn_dir,display=display)
if display == False: print 'VennDiagrams saved to:',output_venn_dir
except Exception:
error = traceback.format_exc()
print_out = 'Venn Diagram export failed..\n',error
if root != None and root != '':
try: InfoWindow(print_out, 'Continue')
except Exception: None
try: root.destroy()
except Exception: None
def altExonViewer(species,platform,exp_file,gene,show_introns,analysisType,root):
import QC
transpose=True
if root == None: display = False
else: display = True
if analysisType == 'Sashimi-Plot':
try:
### Create sashimi plot index
import SashimiIndex
print 'Indexing splicing-events'
SashimiIndex.remoteIndexing(species,exp_file)
import SashimiPlot
#reload(SashimiPlot)
print 'Running Sashimi-Plot...'
SashimiPlot.remoteSashimiPlot(species,exp_file,exp_file,gene) ### assuming the bam files are in the root-dir
if root != None and root != '':
print_out = 'Sashimi-Plot results saved to:\n'+exp_file+'SashimiPlots'
try: InfoWindow(print_out, 'Continue')
except Exception: None
except Exception:
error = traceback.format_exc()
print_out = 'AltExon Viewer failed..\n',error
if root != None and root != '':
try: WarningWindow(print_out, 'Continue')
except Exception: None
try: root.destroy()
except Exception: None
else:
#print [analysisType, species,platform,exp_file,gene,transpose,display,show_introns]
try: QC.displayExpressionGraph(species,platform,exp_file,gene,transpose,display=display,showIntrons=show_introns,analysisType=analysisType)
except Exception:
error = traceback.format_exc()
print_out = 'AltExon Viewer failed..\n',error
if root != None and root != '':
try: WarningWindow(print_out, 'Continue')
except Exception: None
try: root.destroy()
except Exception: None
def MergeFiles(files_to_merge, join_option, ID_option, output_merge_dir, root):
import mergeFiles
try: outputfile = mergeFiles.joinFiles(files_to_merge, join_option, ID_option, output_merge_dir)
except Exception:
outputfile = 'failed'
error = traceback.format_exc()
if outputfile == 'failed':
print_out = 'File merge failed due to:\n',error
else:
print_out = 'File merge complete. See the new file:\n'+outputfile
if root != None and root!= '':
try: InfoWindow(print_out, 'Continue')
except Exception: None
try: root.destroy()
except Exception: None
if outputfile != 'failed': ### Open the folder
try: openDirectory(output_merge_dir)
except Exception: None
def IDconverter(filename, species_code, input_source, output_source, root):
import gene_associations
try: outputfile = gene_associations.IDconverter(filename, species_code, input_source, output_source)
except Exception:
outputfile = 'failed'
error = traceback.format_exc()
if outputfile == 'failed':
print_out = 'Translation failed due to:\n',error
print print_out
else:
print_out = 'ID translation complete. See the new file:\n'+outputfile
if root != None and root!= '':
try: InfoWindow(print_out, 'Continue')
except Exception: None
try: root.destroy()
except Exception: None
if outputfile != 'failed': ### Open the folder
try: openDirectory(export.findParentDir(filename))
except Exception: None
def remoteLP(fl, expr_input_dir, vendor, custom_markerFinder, geneModel, root, modelSize=None):
global species; global array_type
species = fl.Species()
array_type = fl.PlatformType()
runLineageProfiler(fl, expr_input_dir, vendor, custom_markerFinder, geneModel, root, modelSize=modelSize)
def runLineageProfiler(fl, expr_input_dir, vendor, custom_markerFinder, geneModel, root, modelSize=None):
if custom_markerFinder == '': custom_markerFinder = False
if modelSize != None and modelSize != 'no':
try: modelSize = int(modelSize)
except Exception: modelSize = 'optimize'
if (geneModel == None or geneModel == False) and (modelSize == None or modelSize == 'no') and custom_markerFinder == False:
import ExpressionBuilder
compendium_type = fl.CompendiumType()
compendium_platform = fl.CompendiumPlatform()
if 'exp.' in expr_input_dir:
### Correct the input file to be the gene-expression version
if array_type != "3'array" and 'AltExon' not in compendium_type:
if 'steady' not in expr_input_dir:
expr_input_dir = string.replace(expr_input_dir,'.txt','-steady-state.txt')
print '\n****Running LineageProfiler****'
graphic_links = ExpressionBuilder.remoteLineageProfiler(fl,expr_input_dir,array_type,species,vendor,customMarkers=custom_markerFinder,specificPlatform=True)
if len(graphic_links)>0:
print_out = 'Lineage profiles and images saved to the folder "DataPlots" in the input file folder.'
try: InfoWindow(print_out, 'Continue')
except Exception: None
else:
print_out = 'Analysis error occured...\nplease see warning printouts.'
try: print print_out
except Exception: None ### Windows issue with the Tk status window stalling after pylab.show is called
try: WarningWindow(print_out,'Continue')
except Exception: None
else:
import LineageProfilerIterate
print '\n****Running LineageProfilerIterate****'
codingtype = 'exon'; compendium_platform = 'exon'
platform = array_type,'Affymetrix'
try: LineageProfilerIterate.runLineageProfiler(species,platform,expr_input_dir,expr_input_dir,codingtype,compendium_platform,customMarkers=custom_markerFinder,geneModels=geneModel,modelSize=modelSize)
except Exception:
print_out = traceback.format_exc()
try: InfoWindow(print_out, 'Continue')
except Exception: None
print_out = 'LineageProfiler classification results saved to the folder "SampleClassification".'
if root!=None and root!='':
try: openDirectory(export.findParentDir(expr_input_dir)+'/SampleClassification')
except Exception: None
try: InfoWindow(print_out, 'Continue')
except Exception: None
else:
print print_out
try: root.destroy()
except Exception: None
def performPCA(filename, pca_labels, pca_algorithm, transpose, root, plotType='3D',display=True,geneSetName=None, species=None):
import clustering; reload(clustering)
graphics = []
if pca_labels=='yes' or pca_labels=='true'or pca_labels=='TRUE': pca_labels=True
else: pca_labels=False
try:
clustering.runPCAonly(filename, graphics, transpose, showLabels=pca_labels, plotType=plotType,display=display, algorithm=pca_algorithm, geneSetName=geneSetName, species=species)
try: print'Finished building PCA.'
except Exception: None ### Windows issue with the Tk status window stalling after pylab.show is called
except Exception:
if 'importData' in traceback.format_exc():
try: print traceback.format_exc(),'\n'
except Exception: None ### Windows issue with the Tk status window stalling after pylab.show is called
print_out = 'Bad input file! Should be a tab-delimited text file with a single\nannotation column and row and the remaining as numeric values.'
else:
try: print traceback.format_exc(),'\n'
except Exception: None ### Windows issue with the Tk status window stalling after pylab.show is called
print_out = 'Analysis error occured...\nplease try again with different parameters.'
try: print print_out
except Exception: None ### Windows issue with the Tk status window stalling after pylab.show is called
try: WarningWindow(print_out,'Continue')
except Exception: None
try: root.destroy()
except Exception: null=[]
def createHeatMap(filename, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, root, display=True):
graphics = []
try:
import clustering; reload(clustering)
clustering.runHCexplicit(filename, graphics, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=display, contrast = contrast)
print_out = 'Finished building heatmap.'
try: print print_out
except Exception: None ### Windows issue with the Tk status window stalling after pylab.show is called
try: root.destroy()
except Exception: pass ### DO NOT PRINT HERE... CONFLICTS WITH THE STOUT
except Exception:
if 'importData' in traceback.format_exc():
try: print traceback.format_exc(),'\n'
except Exception: None ### Windows issue with the Tk status window stalling after pylab.show is called
print_out = 'Bad input file! Should be a tab-delimited text file with a single\nannotation column and row and the remaining as numeric values.'
else:
try: print traceback.format_exc(),'\n'
except Exception: None ### Windows issue with the Tk status window stalling after pylab.show is called
print_out = 'Analysis error occured...\nplease try again with different parameters.'
try: print print_out
except Exception: None ### Windows issue with the Tk status window stalling after pylab.show is called
try: WarningWindow(print_out,'Continue')
except Exception: None
try: root.destroy()
except Exception: null=[]
def getAdditionalOnlineResources(species_code,additional_resources,root):
if additional_resources[0] == 'customSet':
additional_resources = additional_resources[1]
elif additional_resources == 'All Resources':
additional_resources = importResourceList()
else: additional_resources = [additional_resources]
try:
print 'Adding supplemental GeneSet and Ontology Collections'
import GeneSetDownloader; force = 'yes'
GeneSetDownloader.buildAccessoryPathwayDatabases([species_code],additional_resources,force)
try: print'Finished incorporating additional resources.'
except Exception: None ### Windows issue with the Tk status window stalling after pylab.show is called
except Exception:
print_out = 'Download error encountered for additional ontologies and gene-sets...\nplease try again later.'
try: print print_out
except Exception: None ### Windows issue with the Tk status window stalling after pylab.show is called
try: WarningWindow(print_out,'Continue')
except Exception: None
try: root.destroy()
except Exception: null=[]
class StringVarFile:
def __init__(self,stringVar,window):
self.__newline = 0; self.__stringvar = stringVar; self.__window = window
def write(self,s): ### Write is called by python when any new print statement is called
new = self.__stringvar.get()
for c in s:
#if c == '\n': self.__newline = 1
if c == '\k': self.__newline = 1 ### This should not be found and thus results in a continous feed rather than replacing a single line
else:
if self.__newline: new = ""; self.__newline = 0
new = new+c
self.set(new)
#except Exception: None ### Not sure why this occurs
try:
log_report = open(logfile,'a')
log_report.write(s); log_report.close() ### Variable to record each print statement
except Exception: pass
def set(self,s): self.__stringvar.set(s); self.__window.update()
def get(self): return self.__stringvar.get()
def flush(self): pass
################# GUI #################
class ProgressBar:
def __init__(self,method,t):
#http://tkinter.unpythonic.net/wiki/ProgressBar
self.progval = IntVar(t)
self.progmsg = StringVar(t); self.progmsg.set(method+" in Progress...")
#b = Button(t, relief=LINK, text="Quit (using bwidget)", command=t.destroy); b.pack()
self.c = ProgressDialog(t, title="Please wait...",
type="infinite",
width=30,
textvariable=self.progmsg,
variable=self.progval,
command=lambda: self.c.destroy()
)
self.update_progress()
def update_progress(self):
self.progval.set(2)
self.c.after(10, self.update_progress)
class ImageFiles:
def __init__(self,shortname,fullpath,return_gif=False):
self.shortname = shortname
self.fullpath = fullpath
self.return_gif = return_gif
def ShortName(self): return self.shortname
def FullPath(self): return self.fullpath
def returnGIF(self): return self.return_gif
def Thumbnail(self):
if self.returnGIF():
gif_path = string.replace(self.FullPath(),'.png','.gif')
return gif_path
else:
png_path = string.replace(self.FullPath(),'.png','_small.png')
return png_path
class GUI:
def PredictGroups(self):
self.button_flag = True
self.graphic_link = {}
import Image
self.toplevel_list=[] ### Keep track to kill later
self.filename_db={}
filenames=[]
i=1
for (name,file) in graphic_links:
self.filename_db['clusters '+str(i)]=file
filenames.append('clusters '+str(i))
i+=1
self.title = 'Select cluster groups for further analysis'
self.option = 'group_select' ### choose a variable name here
self.options = filenames
self.default_option = 0
self.comboBox()
# create a frame and pack it
frame1 = Tkinter.Frame(self.parent_type)
frame1.pack(side=Tkinter.TOP, fill=Tkinter.X)
### Convert PNG to GIF and re-size
assigned_index=1
for image_file in filenames:
file_dir = self.filename_db[image_file]
iF = ImageFiles(image_file,file_dir)
im = Image.open(file_dir)
#im.save('Gfi1.gif')
size = 128, 128
im.thumbnail(size, Image.ANTIALIAS)
im.save(iF.Thumbnail()) ### write out the small gif file
option = 'imageView'
self.option=option
#photo1 = Tkinter.PhotoImage(file=iF.Thumbnail())
photo1 = ImageTk.PhotoImage(file=iF.Thumbnail()) ### specifically compatible with png files
# create the image button, image is above (top) the optional text
def view_FullImageOnClick(image_name):
tl = Toplevel() #### This is the critical location to allow multiple TopLevel instances that don't clash, that are created on demand (by click)
self.toplevel_list.append(tl)
self.graphic_link['WP'] = self.filename_db[image_name]
try: self.viewPNGFile(tl) ### ImageTK PNG viewer
except Exception:
print traceback.format_exc()
try: self.openPNGImage() ### OS default PNG viewer
except Exception: pass
if assigned_index == 1:
image_file1 = image_file; #tl1 = Toplevel() ### not good to create here if we have to destroy it, because then we can't re-invoke
button1 = Tkinter.Button(frame1, compound=Tkinter.TOP, image=photo1,
text=image_file1, bg='green', command=lambda:view_FullImageOnClick(image_file1)) ### without lamda, the command is called before being clicked
button1.image = photo1; button1.pack(side=Tkinter.TOP, padx=2, pady=2)
elif assigned_index == 2:
image_file2 = image_file; #tl2 = Toplevel()
button2 = Tkinter.Button(frame1, compound=Tkinter.TOP, image=photo1,
text=image_file2, bg='green', command=lambda:view_FullImageOnClick(image_file2))
button2.image = photo1; button2.pack(side=Tkinter.TOP, padx=2, pady=2)
elif assigned_index == 3:
image_file3 = image_file; #tl3 = Toplevel()
button3 = Tkinter.Button(frame1, compound=Tkinter.TOP, image=photo1,
text=image_file3, bg='green', command=lambda:view_FullImageOnClick(image_file3))
button3.image = photo1; button3.pack(side=Tkinter.TOP, padx=2, pady=2)
elif assigned_index == 4:
image_file4 = image_file; #tl4 = Toplevel()
button4 = Tkinter.Button(frame1, compound=Tkinter.TOP, image=photo1,
text=image_file4, bg='green', command=lambda:view_FullImageOnClick(image_file4))
button4.image = photo1; button4.pack(side=Tkinter.TOP, padx=2, pady=2)
elif assigned_index == 5:
image_file5 = image_file; #tl5 = Toplevel()
button5 = Tkinter.Button(frame1, compound=Tkinter.TOP, image=photo1,
text=image_file5, bg='green', command=lambda:view_FullImageOnClick(image_file5))
button5.image = photo1; button5.pack(side=Tkinter.TOP, padx=2, pady=2)
elif assigned_index == 6:
image_file6 = image_file; #tl4 = Toplevel()
button6 = Tkinter.Button(frame1, compound=Tkinter.TOP, image=photo1,
text=image_file6, bg='green', command=lambda:view_FullImageOnClick(image_file6))
button6.image = photo1; button6.pack(side=Tkinter.TOP, padx=2, pady=2)
elif assigned_index == 7:
image_file5 = image_file; #tl5 = Toplevel()
button7 = Tkinter.Button(frame1, compound=Tkinter.TOP, image=photo1,
text=image_file7, bg='green', command=lambda:view_FullImageOnClick(image_file7))
button7.image = photo1; button7.pack(side=Tkinter.TOP, padx=2, pady=2)
elif assigned_index == 8:
image_file8 = image_file; #tl5 = Toplevel()
button8 = Tkinter.Button(frame1, compound=Tkinter.TOP, image=photo1,
text=image_file8, bg='green', command=lambda:view_FullImageOnClick(image_file8))
button8.image = photo1; button8.pack(side=Tkinter.TOP, padx=2, pady=2)
elif assigned_index == 9:
image_file9 = image_file; #tl4 = Toplevel()
button9 = Tkinter.Button(frame1, compound=Tkinter.TOP, image=photo1,
text=image_file9, bg='green', command=lambda:view_FullImageOnClick(image_file9))
button9.image = photo1; button9.pack(side=Tkinter.TOP, padx=2, pady=2)
elif assigned_index == 10:
image_file10 = image_file; #tl5 = Toplevel()
button10 = Tkinter.Button(frame1, compound=Tkinter.TOP, image=photo1,
text=image_file10, bg='green', command=lambda:view_FullImageOnClick(image_file10))
button10.image = photo1; button10.pack(side=Tkinter.TOP, padx=2, pady=2)
assigned_index+=1
# start the event loop
use_selected_button = Button(self._parent, text="Use Selected", command=self.UseSelected)
use_selected_button.pack(side = 'right', padx = 10, pady = 5)
recluster_button = Button(self._parent, text="Re-Cluster", command=self.ReCluster)
recluster_button.pack(side = 'right', padx = 10, pady = 5)
quit_button = Button(self._parent, text="Quit", command=self.quit)
quit_button.pack(side = 'right', padx = 10, pady = 5)
try: help_button = Button(self._parent, text='Help', command=self.GetHelpTopLevel); help_button.pack(side = 'left', padx = 5, pady = 5)
except Exception: help_button = Button(self._parent, text='Help', command=self.linkout); help_button.pack(side = 'left', padx = 5, pady = 5)
self._parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
self._parent.mainloop()
def UseSelected(self):
status = self.checkAllTopLevelInstances()
if status:
self.checkAllTopLevelInstances()
self._user_variables['next'] = 'UseSelected'
try: self._parent.quit(); self._parent.destroy()
except Exception: self._parent.quit()
def ReCluster(self):
status = self.checkAllTopLevelInstances()
if status:
self._user_variables['next'] = 'ReCluster'
try: self._parent.quit(); self._parent.destroy()
except Exception:
try: self._parent.destroy()
except Exception: pass
def checkAllTopLevelInstances(self):
### Ideally, we would just kill any open toplevel instances, but this was causing a "ghost" process
### to continue running even after all of the tls and roots were destroyed
if len(self.toplevel_list)>0:
removed=[]
for tl in self.toplevel_list:
try:
if 'normal' == tl.state():
InfoWindow('Please close all cluster windows before proceeding.', 'Continue')
break
except Exception:
removed.append(tl)
for tl in removed:
self.toplevel_list.remove(tl)
if len(self.toplevel_list)==0:
return True
else:
return False
def killAllTopLevelInstances(self):
### destroy's any live TopLevel instances
removed=[]
for tl in self.toplevel_list:
try: tl.quit(); tl.destroy(); removed.append(tl)
except Exception: pass
for tl in removed:
self.toplevel_list.remove(tl)
def ViewWikiPathways(self):
""" Canvas is already drawn at this point from __init__ """
global pathway_db
pathway_db={}
button_text = 'Help'
### Create a species drop-down option that can be updated
current_species_names,manufacturers_list = getSpeciesList('') ### pass the variable vendor to getSpeciesList (none in this case) --- different than the GO-Elite UI call
self.title = 'Select species to search for WikiPathways '
self.option = 'species_wp'
self.options = ['---']+current_species_names #species_list
self.default_option = 0
self.comboBox()
### Create a label that can be updated below the dropdown menu
self.label_name = StringVar()
self.label_name.set('Pathway species list may take several seconds to load')
self.invokeLabel() ### Invoke a new label indicating that the database is loading
### Create a MOD selection drop-down list
system_list,mod_list = importSystemInfo() ### --- different than the GO-Elite UI call
self.title = 'Select the ID system to translate to (MOD)'
self.option = 'mod_wp'
self.options = mod_list
try: self.default_option = mod_list.index('Ensembl') ### Get the Ensembl index number
except Exception: self.default_option = 0
self.dropDown()
### Create a file selection option
self.title = 'Select GO-Elite input ID text file'
self.notes = 'note: ID file must have a header row and at least three columns:\n'
self.notes += '(1) Identifier, (2) System Code, (3) Value to map (- OR +)\n'
self.file_option = 'goelite_input_file'
self.directory_type = 'file'
self.FileSelectionMenu()
dispaly_pathway = Button(text = 'Display Pathway', command = self.displayPathway)
dispaly_pathway.pack(side = 'right', padx = 10, pady = 10)
back_button = Button(self._parent, text="Back", command=self.goBack)
back_button.pack(side = 'right', padx =10, pady = 5)
quit_win = Button(self._parent, text="Quit", command=self.quit)
quit_win.pack(side = 'right', padx =10, pady = 5)
try: help_button = Button(self._parent, text=button_text, command=self.GetHelpTopLevel); help_button.pack(side = 'left', padx = 5, pady = 5)
except Exception: help_button = Button(self._parent, text=button_text, command=self.linkout); help_button.pack(side = 'left', padx = 5, pady = 5)
self._parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
self._parent.mainloop()
def FileSelectionMenu(self):
option = self.file_option
group = PmwFreeze.Group(self.parent_type,tag_text = self.title)
group.pack(fill = 'both', expand = 1, padx = 10, pady = 2)
def filecallback(callback=self.callback,option=option): self.getPath(option)
default_option=''
entrytxt = StringVar(); #self.entrytxt.set(self.default_dir)
entrytxt.set(default_option)
self.pathdb[option] = entrytxt
self._user_variables[option] = default_option
entry = Entry(group.interior(),textvariable=self.pathdb[option]);
entry.pack(side='left',fill = 'both', expand = 0.7, padx = 10, pady = 2)
button = Button(group.interior(), text="select "+self.directory_type, width = 10, fg="red", command=filecallback)
button.pack(side=LEFT, padx = 2,pady = 2)
if len(self.notes)>0: ln = Label(self.parent_type, text=self.notes,fg="blue"); ln.pack(padx = 10)
def dropDown(self):
def comp_callback(tag,callback=self.callbackWP,option=self.option):
callback(tag,option)
self.comp = PmwFreeze.OptionMenu(self.parent_type,
labelpos = 'w', label_text = self.title, items = self.options, command = comp_callback)
if self.option == 'wp_id_selection':
self.wp_dropdown = self.comp ### update this variable later (optional)
self.comp.pack(anchor = 'w', padx = 10, pady = 0, fill = 'x')
self.comp.invoke(self.default_option) ###Just pick the first option
def comboBox(self):
""" Alternative, more sophisticated UI than dropDown (OptionMenu).
Although it behaves similiar it requires different parameters, can not be
as easily updated with new lists (different method) and requires explict
invokation of callback when a default is set rather than selected. """
def comp_callback(tag,callback=self.callbackWP,option=self.option):
callback(tag,option)
self.comp = PmwFreeze.ComboBox(self.parent_type,
labelpos = 'w', dropdown=1, label_text = self.title,
unique = 0, history = 0,
scrolledlist_items = self.options, selectioncommand = comp_callback)
try: self.comp.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.comp.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
if self.option == 'wp_id_selection':
self.wp_dropdown = self.comp ### update this variable later (optional)
self.comp.pack(anchor = 'w', padx = 10, pady = 0)
try: self.comp.selectitem(self.default_option) ###Just pick the first option
except Exception: pass
try: self.callbackWP(self.options[0],self.option) ### Explicitly, invoke first option (not automatic)
except Exception: pass
def invokeLabel(self):
self.label_object = Label(self.parent_type, textvariable=self.label_name,fg="blue"); self.label_object.pack(padx = 10)
def enterMenu(self):
if len(self.notes)>0:
lb = Label(self.parent_type, text=self.notes,fg="black"); lb.pack(pady = 5)
### Create and pack a horizontal RadioSelect widget
def custom_validate(tag,custom_validate=self.custom_validate,option=self.option):
validate = custom_validate(tag,self.option)
self.entry_field = PmwFreeze.EntryField(self.parent_type,
labelpos = 'w', label_text = self.title, validate = custom_validate,
value = self.default_option, hull_borderwidth = 2)
self.entry_field.pack(fill = 'x', expand = 0.7, padx = 10, pady = 5)
def displayAnyPNG(self,png_file):
self.graphic_link={}
self.graphic_link['WP'] = png_file
self.graphic_link['quit']=None
try: tl = Toplevel()
except Exception:
import Tkinter
tl = Tkinter.Toplevel()
try: self.viewPNGFile(tl) ### ImageTK PNG viewer
except Exception:
try: self.openPNGImage() ### OS default PNG viewer
except Exception:
print 'Unable to open PNG file for unknown reasons'
def displayPathway(self):
filename = self._user_variables['goelite_input_file']
mod_type = self._user_variables['mod_wp']
species = self._user_variables['species_wp']
pathway_name = self._user_variables['wp_id_selection']
wpid_selected = self._user_variables['wp_id_enter']
species_code = species_codes[species].SpeciesCode()
wpid = None
if len(wpid_selected)>0:
wpid = wpid_selected
elif len(self.pathway_db)>0:
for wpid in self.pathway_db:
if pathway_name == self.pathway_db[wpid].WPName():
break
if len(filename)==0:
print_out = 'Select an input ID file with values first'
WarningWindow(print_out,'Error Encountered!')
else:
try:
self.graphic_link = WikiPathways_webservice.visualizePathwayAssociations(filename,species_code,mod_type,wpid)
self.wp_status = 'Pathway images colored and saved to disk by webservice\n(see image title for location)'
self.label_status_name.set(self.wp_status)
tl = Toplevel()
try: self.viewPNGFile(tl) ### ImageTK PNG viewer
except Exception:
try: self.openPNGImage() ### OS default PNG viewer
except Exception:
self.wp_status = 'Unable to open PNG file using operating system'
self.label_status_name.set(self.wp_status)
except Exception,e:
try:
wp_logfile = filepath('webservice.log')
wp_report = open(wp_logfile,'a')
wp_report.write(traceback.format_exc())
except Exception:
None
try:
print traceback.format_exc()
except Exception:
null=None ### Occurs when transitioning back from the Official Database download window (not sure why) -- should be fixed in 1.2.4 (sys.stdout not re-routed)
if 'force_no_matching_error' in traceback.format_exc():
print_out = 'None of the input IDs mapped to this pathway'
elif 'force_invalid_pathway' in traceback.format_exc():
print_out = 'Invalid pathway selected'
elif 'IndexError' in traceback.format_exc():
print_out = 'Input ID file does not have at least 3 columns, with the second column being system code'
elif 'ValueError' in traceback.format_exc():
print_out = 'Input ID file error. Please check that you do not have extra rows with no data'
elif 'source_data' in traceback.format_exc():
print_out = 'Input ID file does not contain a valid system code'
else:
print_out = 'Error generating the pathway "%s"' % pathway_name
WarningWindow(print_out,'Error Encountered!')
def getSpeciesPathways(self,species_full):
pathway_list=[]
self.pathway_db = WikiPathways_webservice.getAllSpeciesPathways(species_full)
for wpid in self.pathway_db:
if self.pathway_db[wpid].WPName() != None: ### Not sure where the None comes from but will break the UI if not exlcuded
pathway_list.append(self.pathway_db[wpid].WPName())
pathway_list = unique.unique(pathway_list)
pathway_list.sort()
return pathway_list
def callbackWP(self, tag, option):
#print 'Button',[option], tag,'was pressed.'
self._user_variables[option] = tag
if option == 'group_select':
### set group_select equal to the filename
self._user_variables[option] = self.filename_db[tag]
#print option, tag
#print option, self._user_variables[option], self.filename_db[tag]
if option == 'species_wp':
### Add additional menu options based on user selection
if tag != '---':
### If this already exists from an earlier iteration
hault = False
self.label_name.set('Loading available WikiPathways')
try:
self.pathway_list=self.getSpeciesPathways(tag)
traceback_printout = ''
except Exception,e:
if 'not supported' in traceback.format_exc():
print_out = 'Species not available at WikiPathways'
WarningWindow(print_out,'Species Not Found!')
traceback_printout=''
hault = True
elif 'URLError' in traceback.format_exc():
print_out = 'Internet connection could not be established'
WarningWindow(print_out,'Internet Error')
traceback_printout=''
hault = True
else:
traceback_printout = traceback.format_exc()
try:
if len(self.pathway_list)>0: ### When true, a valid species was selected in a prior interation invoking the WP fields (need to repopulate)
hault = False
except Exception: None
self.pathway_list = ['None']; self.pathway_db={}
self.label_name.set('')
if hault == False:
try:
### If the species specific wikipathways drop down exists, just update it
self.wp_dropdown._list.setlist(self.pathway_list)
self.wp_dropdown.selectitem(self.pathway_list[0])
self.callbackWP(self.pathway_list[0],'wp_id_selection')
except Exception:
### Create a species specific wikipathways drop down
self.option = 'wp_id_selection'
self.title = 'Select WikiPathways to visualize your data'
if len(traceback_printout)>0:
self.title += traceback_printout ### Display the actual problem in the GUI (sloppy but efficient way for users to indicate the missing driver)
self.options = self.pathway_list
self.default_option = 0
self.comboBox() ### Better UI for longer lists of items (dropDown can't scroll on Linux)
### Create a species specific wikipathways ID enter option
self.notes = 'OR'
self.option = 'wp_id_enter'
self.title = 'Enter the WPID (example: WP254) '
self.default_option = ''
self.enterMenu()
try:
### Create a label that can be updated below the dropdown menu
self.wp_status = 'Pathway image may take several seconds to a minute to load...\n'
self.wp_status += '(images saved to "WikiPathways" folder in input directory)'
try: self.label_status_name.set(self.wp_status)
except Exception:
self.label_status_name = StringVar()
self.label_status_name.set(self.wp_status)
self.invokeStatusLabel() ### Invoke a new label indicating that the database is loading
except Exception:
None
if option == 'wp_id_selection':
### Reset any manually input WPID if a new pathway is selected from dropdown
try: self.entry_field.setentry('')
except Exception: null=[]
def ShowImageMPL(self):
png_file_dir = self.graphic_link['WP']
fig = pylab.figure()
pylab.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.00) ### Fill the plot area left to right
ax = fig.add_subplot(111)
ax.set_xticks([]) ### Hides ticks
ax.set_yticks([])
img= pylab.imread(png_file_dir)
imgplot = pylab.imshow(img)
pylab.show()
def viewPNGFile(self,tl):
""" View PNG file within a PMW Tkinter frame """
import ImageTk ### HAVE TO CALL HERE TO TRIGGER AN ERROR - DON'T WANT THE TopLevel to open otherwise
png_file_dir = self.graphic_link['WP']
img = ImageTk.PhotoImage(file=png_file_dir)
sf = PmwFreeze.ScrolledFrame(tl, labelpos = 'n', label_text = '',
usehullsize = 1, hull_width = 800, hull_height = 550)
sf.pack(padx = 0, pady = 0, fill = 'both', expand = 1)
frame = sf.interior()
tl.title(png_file_dir)
can = Canvas(frame)
can.pack(fill=BOTH, padx = 0, pady = 0)
w = img.width()
h = height=img.height()
can.config(width=w, height=h)
can.create_image(2, 2, image=img, anchor=NW)
if 'quit' in self.graphic_link:
tl.protocol("WM_DELETE_WINDOW", lambda: self.tldeleteWindow(tl))
tl.mainloop()
else:
tl.protocol("WM_DELETE_WINDOW", lambda: self.tldeleteWindow(tl))
tl.mainloop()
def openPNGImage(self):
png_file_dir = self.graphic_link['WP']
if os.name == 'nt':
try: os.startfile('"'+png_file_dir+'"')
except Exception: os.system('open "'+png_file_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+png_file_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+png_file_dir+'"')
def __init__(self, parent, option_db, option_list, defaults):
if option_db == 'ViewPNG':
output_filename = defaults
self.displayAnyPNG(output_filename)
return None
self._parent = parent; self._option_list = option_list; self._option_db = option_db
self._user_variables = user_variables; self.pathdb={}; i = -1
enter_index=0; radio_index=0; dropdown_index=0; check_index=0 ### used to keep track of how many enter boxes we have
self.default_dir = PathDir; self.default_file = PathFile
self.defaults = defaults
filename = 'Config/icon.gif'; orient_type = 'left'
if 'input_cel_dir' in option_list:
filename = 'Config/aa_0.gif'
if array_type == 'RNASeq': filename = 'Config/aa_0_rs.gif'
if 'include_raw_data' in option_list:
filename = 'Config/aa_1.gif'; orient_type = 'top'
if array_type == 'RNASeq': filename = 'Config/aa_1_rs.gif'
if 'filter_for_AS' in option_list:
filename = 'Config/aa_2.gif'; orient_type = 'top'
if array_type == 'RNASeq': filename = 'Config/aa_2_rs.gif'
if 'pathway_permutations' in option_list: filename = 'Config/goelite.gif'
if 'GeneSelectionPredict' in option_list:
filename = 'Config/aa_3.gif'
if array_type == 'RNASeq': filename = 'Config/aa_3_rs.gif'
fn=filepath(filename); img = PhotoImage(file=fn)
self.can = Canvas(parent); self.can.pack(side='top'); self.can.config(width=img.width(), height=img.height())
try: self.can.create_image(2, 2, image=img, anchor=NW)
except Exception:
try: self.can.delete("all")
except Exception: pass
#except Exception: print filename; 'what?';kill
self.pathdb={}; use_scroll = 'no'
#if defaults == 'groups' or defaults == 'comps' or 'filter_for_AS' in option_list:
if defaults != 'null':
height = 350; width = 400
if defaults == 'groups':
notes = "For each file, type in a name for the group it belongs to\n(e.g., 24hrs, 48hrs, 4days, etc.)."
Label(self._parent,text=notes).pack(); label_text_str = 'AltAnalyze Group Names'
if len(option_list)<15: height = 320; width = 400
elif defaults == 'batch':
notes = "For each file, type in a name for the BATCH it belongs to\n(e.g., batch1, batch2, batch3 etc.)."
Label(self._parent,text=notes).pack(); label_text_str = 'AltAnalyze Group Names'
if len(option_list)<15: height = 320; width = 400
elif defaults == 'comps':
notes = "Experimental Group\t\t\tBaseline Group "
label_text_str = 'AltAnalyze Pairwise Group Comparisons'
if len(option_list)<5: height = 250; width = 400
elif 'filter_for_AS' in option_list:
label_text_str = 'AltAnalyze Alternative Exon Analysis Parameters'
height = 350; width = 400; use_scroll = 'yes'
if os.name != 'nt': width+=100
elif 'pathway_permutations' in option_list:
label_text_str = 'GO-Elite Parameters'
height = 350; width = 425; use_scroll = 'yes'
elif 'expression_data_format' in option_list:
label_text_str = "AltAnalyze Expression Dataset Parameters"
height = 350; width = 400; use_scroll = 'yes'
if os.name != 'nt': width+=100
elif 'Genes_network' in option_list:
label_text_str = "Network Analysis Parameters"
height = 350; width = 400; use_scroll = 'yes'
#if os.name != 'nt': width+=50
elif 'GeneSelectionPredict' in option_list:
notes = "Perform an unsupervised or supervised analysis to identify the\npredominant sample groups via expression clustering"
Label(self._parent,text=notes).pack()
label_text_str = "AltAnalyze Prediction Sample Group Parameters"
height = 310; width = 400; use_scroll = 'yes'
elif 'join_option' in option_list:
label_text_str = "AltAnalyze Merge Files Parameters"
height = 310; width = 400; use_scroll = 'yes'
else:
label_text_str = "AltAnalyze Main Dataset Parameters"
height = 310; width = 400; use_scroll = 'yes'
if os.name != 'nt':height+=75; width+=150
if os.name== 'nt':height+=25; width+=50
if 'linux' in sys.platform: offset = 25
else: offset=0
self.sf = PmwFreeze.ScrolledFrame(self._parent,
labelpos = 'n', label_text = label_text_str,
usehullsize = 1, hull_width = width-offset, hull_height = height-offset)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
if defaults == 'comps':
Label(self.frame,text=notes).pack()
create_group = 'yes'
if 'pathway_permutations' in option_list or 'expression_data_format' in option_list or 'filter_probe_types' in option_list:
if 'ge_ptype' in option_list:
self.group_tag = 'GO-Elite Gene Expression Analysis Filters'
elif 'pathway_permutations' in option_list:
self.group_tag = 'GO-Elite Over-Representation and Filtering Parameters'
if 'expression_threshold' in option_list:
self.group_tag = 'Exon/Junction Filtering Options'
od = option_db['expression_threshold']
if od.ArrayOptions() == ['NA']: create_group = 'no'
if 'rpkm_threshold' in option_list and create_group== 'no':
create_group='yes'
self.group_tag = 'Gene Expression Filtering Options'
od = option_db['rpkm_threshold']
if od.ArrayOptions() == ['NA']: create_group = 'no'
elif 'expression_data_format' in option_list and 'rpkm_threshold' not in option_list:
self.group_tag = 'Gene Expression Analysis Options'
if 'filter_probe_types' in option_list:
self.group_tag = 'Primary Alternative Exon Parameters'
if create_group == 'yes':
custom_group = PmwFreeze.Group(self.sf.interior(),tag_text = self.group_tag)
custom_group.pack(fill = 'both', expand = 1, padx = 10, pady = 2)
insert_into_group = 'yes'
else: insert_into_group = 'no'
else: insert_into_group = 'no'
object_directions = ['top','bottom','up','down']
if option_db == 'ViewWikiPathways':
width = 520
self.parent_type = self.sf.interior()
self.ViewWikiPathways()
if option_db == 'PredictGroups':
width = 520
self.parent_type = self.sf.interior()
self.PredictGroups()
for option in option_list:
i+=1 ####Keep track of index - if options are deleted, count these to select the appropriate default from defaults
if option in option_db:
od = option_db[option]; self.title = od.Display(); notes = od.Notes()
self.display_options = od.ArrayOptions()
try: override_default = od.DefaultOption()
except Exception: override_default = ''
if 'radio' in od.DisplayObject() and self.display_options != ['NA']:
if use_scroll == 'yes': parent_type = self.sf.interior()
else: parent_type = self._parent
if 'pathway_permutations' in option_list or 'new_run' in option_list: orient_type = 'top'
if insert_into_group == 'yes': parent_type = custom_group.interior(); radio_index+=1
### Create and pack a RadioSelect widget, with radiobuttons.
self._option = option
def radiocallback(tag,callback=self.callback,option=option):
callback(tag,option)
radiobuttons = PmwFreeze.RadioSelect(parent_type,
buttontype = 'radiobutton', orient = 'vertical',
labelpos = 'w', command = radiocallback, label_text = self.title,
hull_borderwidth = 2, hull_relief = 'ridge')
if insert_into_group == 'no': radiobuttons.pack(side = orient_type, expand = 1, padx = 10, pady = 10)
elif radio_index == 1: radiobuttons1 = radiobuttons
elif radio_index == 2: radiobuttons2 = radiobuttons
### print self.display_options
### Add some buttons to the radiobutton RadioSelect.
for text in self.display_options:
if text != ['NA']: radiobuttons.add(text)
if len(override_default)>0: self.default_option = override_default
elif len(defaults) <1:
try: self.default_option = self.display_options[0]
except Exception: print option; kill
else: self.default_option = defaults[i]
radiobuttons.invoke(self.default_option)
if len(notes)>0: Label(self._parent, text=notes).pack()
if 'button' in od.DisplayObject() and self.display_options != ['NA']:
if use_scroll == 'yes': parent_type = self.sf.interior()
else: parent_type = self._parent
self._option = option
if mac_print_mode == 'yes' or 'radbutton' in od.DisplayObject(): button_type = 'radiobutton'
else: button_type = 'button'
### Create and pack a horizontal RadioSelect widget.
if len(override_default)>0: self.default_option = override_default
elif len(defaults) <1: self.default_option = self.display_options[0]
else: self.default_option = defaults[i]
def buttoncallback(tag,callback=self.callback,option=option):
callback(tag,option)
orientation = 'vertical'
#if 'pathway_permutations' in option_list or 'new_run' in option_list: orientation = 'vertical'
#elif 'run_from_scratch' in option_list: orientation = 'vertical'
#else: orientation = 'vertical'
horiz = PmwFreeze.RadioSelect(parent_type, buttontype = button_type, orient = orientation,
labelpos = 'w', command = buttoncallback,
label_text = self.title, frame_borderwidth = 2,
frame_relief = 'ridge'
); horiz.pack(fill = 'x',padx = 10, pady = 10)
### Add some buttons to the horizontal RadioSelect
for text in self.display_options:
if text != ['NA']: horiz.add(text)
horiz.invoke(self.default_option)
if len(notes)>0: Label(self._parent, text=notes).pack()
if ('folder' in od.DisplayObject() or 'file' in od.DisplayObject()) and self.display_options != ['NA']:
if use_scroll == 'yes': parent_type = self.sf.interior()
else: parent_type = self._parent
proceed = 'yes'
#if option == 'raw_input': proceed = 'no'
if proceed == 'yes':
self._option = option
group = PmwFreeze.Group(parent_type,tag_text = self.title)
group.pack(fill = 'both', expand = 1, padx = 10, pady = 2)
def filecallback(callback=self.callback,option=option): self.getPath(option)
entrytxt = StringVar(); #self.entrytxt.set(self.default_dir)
try: default_option = string.replace(override_default,'---','')
except Exception: default_option = ''
entrytxt.set(default_option)
self.pathdb[option] = entrytxt
self._user_variables[option] = default_option
#l = Label(group.interior(), text=self.title); l.pack(side=LEFT)
entry = Entry(group.interior(),textvariable=self.pathdb[option]);
entry.pack(side='left',fill = 'both', expand = 1, padx = 10, pady = 2)
button = Button(group.interior(), text="select "+od.DisplayObject(), width = 10, fg="red", command=filecallback); button.pack(side=LEFT, padx = 2,pady = 2)
#print option,run_mappfinder, self.title, self.default_option
if len(notes)>0: ln = Label(parent_type, text=notes,fg="blue"); ln.pack(padx = 10)
if ('update-entry' in od.DisplayObject()) and self.display_options != ['NA']:
if use_scroll == 'yes': parent_type = self.sf.interior()
else: parent_type = self._parent
proceed = 'yes'
#if option == 'raw_input': proceed = 'no'
if proceed == 'yes':
self._option = option
#group = PmwFreeze.Group(parent_type,tag_text = self.title)
#group.pack(fill = 'both', expand = 1, padx = 10, pady = 2)
entrytxt = StringVar(); #self.entrytxt.set(self.default_dir)
try: default_option = defaults[i]
except Exception: default_option = ''
entrytxt.set(default_option)
self.pathdb[option] = entrytxt
self._user_variables[option] = default_option
#l = Label(parent_type, text=self.title); l.pack(side=LEFT)
#entry = Entry(parent_type,textvariable=self.pathdb[option]);
#entry.pack(side='left',fill = 'both', expand = 1, padx = 10, pady = 2)
l = Label(self.sf.interior(), text=self.title); l.pack()
entry = Entry(self.sf.interior(),textvariable=self.pathdb[option]);
entry.pack()
#print option,run_mappfinder, self.title, self.default_option
#if len(notes)>0: ln = Label(parent_type, text=notes,fg="blue"); ln.pack(padx = 10)
if 'drop-down' in od.DisplayObject() and self.display_options != ['NA']:
#print option, defaults, self.display_options
if use_scroll == 'yes': parent_type = self.sf.interior()
else: parent_type = self._parent
if insert_into_group == 'yes': parent_type = custom_group.interior(); dropdown_index+=1
self._option = option
self.default_option = self.display_options
def comp_callback1(tag,callback=self.callback,option=option):
callback(tag,option)
self.comp = PmwFreeze.OptionMenu(parent_type,
labelpos = 'w', label_text = self.title,
items = self.default_option, command = comp_callback1)
try: selected_default = od.DefaultOption()
except Exception:
if len(defaults)>0: selected_default = defaults[i]
else: selected_default = self.default_option[0] ###Just pick the first option
#print option, dropdown_index
if 'species' in option:
if 'selected_species2' in option:
self.speciescomp2 = self.comp; self.speciescomp2.pack(anchor = 'e', padx = 10, pady = 0, expand = 1, fill = 'both')
elif 'selected_species3' in option:
self.speciescomp3 = self.comp; self.speciescomp3.pack(anchor = 'e', padx = 10, pady = 0, expand = 1, fill = 'both')
else: self.speciescomp = self.comp; self.speciescomp.pack(anchor = 'e', padx = 10, pady = 0, expand = 1, fill = 'both')
self.speciescomp.invoke(selected_default)
elif 'array_type' in option:
self.arraycomp = self.comp; self.arraycomp.pack(anchor = 'e', padx = 10, pady = 0, expand = 1, fill = 'both')
self.arraycomp.invoke(selected_default)
elif 'manufacturer_selection' in option:
self.vendorcomp = self.comp; self.vendorcomp.pack(anchor = 'e', padx = 10, pady = 0, expand = 1, fill = 'both')
self.vendorcomp.invoke(selected_default)
else:
if insert_into_group == 'no':
if 'version' in option: pady_int = 0
else: pady_int = 1
self.comp.pack(anchor = 'w', padx = 10, pady = pady_int, expand = 1, fill = 'both')
elif dropdown_index == 1: comp1 = self.comp
elif dropdown_index == 2: comp2 = self.comp
elif dropdown_index == 3: comp3 = self.comp
elif dropdown_index == 4: comp4 = self.comp
elif dropdown_index == 5: comp5 = self.comp
elif dropdown_index == 6: comp6 = self.comp
elif dropdown_index == 7: comp7 = self.comp
elif dropdown_index == 8: comp8 = self.comp
elif dropdown_index == 9: comp9 = self.comp
elif dropdown_index == 10: comp10 = self.comp
elif dropdown_index == 11: comp11 = self.comp
elif dropdown_index == 12: comp12 = self.comp
elif dropdown_index == 13: comp13 = self.comp
try: self.comp.invoke(selected_default)
except Exception:
#self.comp.invoke(self.display_options[0]) # better to know the variable incase their is a conflict
print self.display_options, selected_default, option, option_list;kill
if option == 'selected_version':
notes = 'Note: Available species may vary based on database selection \n'
ln = Label(parent_type, text=notes,fg="blue"); ln.pack(padx = 10)
if option == 'probability_algorithm':
notes = 'Note: Moderated tests only run for gene-expression analyses \n'
ln = Label(parent_type, text=notes,fg="blue"); ln.pack(padx = 3)
if 'comboBox' in od.DisplayObject() and self.display_options != ['NA']:
if use_scroll == 'yes': parent_type = self.sf.interior()
else: parent_type = self._parent
if insert_into_group == 'yes': parent_type = custom_group.interior(); dropdown_index+=1
self._option = option
self.default_option = self.display_options
try: selected_default = od.DefaultOption()
except Exception:
if len(defaults)>0: selected_default = defaults[i]
else: selected_default = self.default_option[0] ###Just pick the first option
listbox_selectmode = 'single'
if 'multiple' in od.DisplayObject():
listbox_selectmode = 'multiple'
def comp_callback1(tag,callback=self.callbackComboBox,option=option):
callback(tag,option)
def mult_callback(tag,callback=self.callbackComboBox,option=option):
if 'PathwaySelection' in option:
tag = self.pathwayselect.getcurselection() ### there is a conflict otherwise with another multi-comboBox multcomp object
elif 'HeatmapAdvanced' in option:
tag = self.HeatmapAdvanced.getcurselection() ### there is a conflict otherwise with another multi-comboBox multcomp object
else:
tag = self.multcomp.getcurselection() ### get the multiple item selection
callback(tag,option)
if 'selected_version' not in option_list: ### For clustering UI
label_pos = 'e' ### Orients to the text left -> east
entrywidth = 20 ### Width of entry
#entry_foreground = 'black'
hullsize = 1 #http://pmw.sourceforge.net/doc/ScrolledListBox.html -> doesn't seem to work here
else:
label_pos = 'w' ### Orients to the text right -> west
entrywidth = 20 ### Width of entry
hullsize = 1
if listbox_selectmode == 'multiple':
self.comp = PmwFreeze.ComboBox(parent_type,
labelpos = label_pos, dropdown=1, label_text = self.title,
unique = 0, history = 0, entry_background="gray", entry_width=entrywidth,
scrolledlist_usehullsize=1,listbox_selectmode=listbox_selectmode,
scrolledlist_items = self.default_option,
selectioncommand = mult_callback)
self.multcomp = self.comp
else:
self.comp = PmwFreeze.ComboBox(parent_type,
labelpos = label_pos, dropdown=1, label_text = self.title,
unique = 0, history = 0, entry_background="gray", entry_width=entrywidth,
scrolledlist_usehullsize=1,listbox_selectmode=listbox_selectmode,
scrolledlist_items = self.default_option,
selectioncommand = comp_callback1)
if 'HeatmapAdvanced' in option:
self.HeatmapAdvanced = self.multcomp
if 'PathwaySelection' in option or 'PathwaySelection_network' in option:
if 'network' in option:
geneset_param = 'GeneSetSelection_network' ### for network visualization
else:
geneset_param = 'GeneSetSelection' ### for heatmap visualization
self.pathwayselect = self.multcomp; self.pathwayselect.pack(anchor = 'w', padx = 10, pady = 0)
try: self.pathwayselect.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.pathwayselect.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
try:
### The next several lines are for a second iteration of this analysis to re-select the previously selected parameters
tag = self._user_variables[geneset_param]
if 'Ontology' in tag: directory = 'gene-go'
else: directory = 'gene-mapp'
supported_genesets = self._user_variables[tag]
#print 'loading pathways from memory A1'
#supported_genesets = listAllGeneSetCategories(species,tag,directory)
self.pathwayselect._list.setlist(supported_genesets)
self.pathwayselect.selectitem(selected_default)
self.callbackComboBox(selected_default,option)
except Exception:
try:
self.pathwayselect.selectitem(self.default_option[-1]) ###Just pick the first option
self.callbackComboBox(self.default_option[-1],option)
except Exception: pass
if 'species' in option:
if 'selected_species2' in option:
self.speciescomp2 = self.comp; self.speciescomp2.pack(anchor = 'w', padx = 10, pady = 0)
try: self.speciescomp2.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.speciescomp2.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
try:
self.speciescomp2.selectitem(selected_default)
self.callbackComboBox(selected_default,option)
except Exception:
self.speciescomp2.selectitem(self.default_option[0]) ###Just pick the first option
self.callbackComboBox(self.default_option[0],option)
elif 'selected_species3' in option:
self.speciescomp3 = self.comp; self.speciescomp3.pack(anchor = 'w', padx = 10, pady = 0)
try: self.speciescomp3.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.speciescomp3.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
try:
self.speciescomp3.selectitem(selected_default) ###Just pick the first option
self.callbackComboBox(selected_default,option)
except Exception:
self.speciescomp3.selectitem(self.default_option[0])
self.callbackComboBox(self.default_option[0],option)
else:
self.speciescomp = self.comp; self.speciescomp.pack(anchor = 'w', padx = 10, pady = 0)
try: self.speciescomp.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.speciescomp.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
try:
self.speciescomp.selectitem(selected_default)
self.callbackComboBox(selected_default,option)
except Exception:
self.speciescomp.selectitem(self.default_option[0])
self.callbackComboBox(self.default_option[0],option)
elif 'array_type' in option:
self.arraycomp = self.comp; self.arraycomp.pack(anchor = 'w', padx = 10, pady = 0)
try: self.arraycomp.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.arraycomp.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
try:
self.arraycomp.selectitem(selected_default)
self.callbackComboBox(selected_default,option)
except Exception:
self.arraycomp.selectitem(self.default_option[0])
self.callbackComboBox(self.default_option[0],option)
elif 'manufacturer_selection' in option:
self.vendorcomp = self.comp; self.vendorcomp.pack(anchor = 'w', padx = 10, pady = 0)
try: self.vendorcomp.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.vendorcomp.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
try:
self.vendorcomp.selectitem(selected_default)
self.callbackComboBox(selected_default,option)
except Exception:
self.vendorcomp.selectitem(self.default_option[0])
self.callbackComboBox(self.default_option[0],option)
else:
self.combo = self.comp ### has to be a unique combo box to refer to itself in the component call below
self.combo.pack(anchor = 'w', padx = 10, pady = 1)
try: self.combo.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.combo.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
"""
if listbox_selectmode == 'multiple':
if len(od.DefaultOption()[0])>1: ###Hence it is a list
self.combo.ApplyTypeSelections(od.DefaultOption())
for opt in od.DefaultOption():
self.combo.invoke(opt)
self.callbackComboBox(tuple(od.DefaultOption()),option)
#self.combo.selectitem(opt)
#self.callbackComboBox(opt,option)
"""
#print selected_default
try:
if len(selected_default[0])>1: ###Hence it is a list
for opt in selected_default:
self.combo.selectitem(opt)
self.callbackComboBox(opt,option); break
else:
### This is where the default for the combobox is actually selected for GeneSets
self.combo.selectitem(selected_default)
self.callbackComboBox(selected_default,option)
except Exception:
try:
self.combo.selectitem(self.default_option[0])
self.callbackComboBox(self.default_option[0],option)
except Exception:
None
if option == 'selected_version':
notes = 'Note: Available species may vary based on database selection \n'
ln = Label(parent_type, text=notes,fg="blue"); ln.pack(padx = 10)
if 'pulldown_comps' in od.DisplayObject() and self.display_options != ['NA']:
self._option = option
self.default_option = self.display_options
###From the option, create two new options, one for each group in the comparison
option1 = option+'-1'; option2 = option+'-2'
### Pack these into a groups to maintain organization
group = PmwFreeze.Group(self.sf.interior(),tag_text = self.title)
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
if check_index == -1:
check_option = 'analyze_all_conditions'
def checkbuttoncallback(tag,state,checkbuttoncallback=self.checkbuttoncallback,option=check_option):
#print tag,state,option
checkbuttoncallback(tag,state,option)
### Create and pack a vertical RadioSelect widget, with checkbuttons.
self.checkbuttons = PmwFreeze.RadioSelect(self._parent,
buttontype = 'checkbutton', command = checkbuttoncallback)
self.checkbuttons.pack(side = 'top', expand = 1, padx = 0, pady = 0)
### Add some buttons to the checkbutton RadioSelect.
self.checkbuttons.add('Analyze ALL GROUPS in addition to specifying comparisons')
self._user_variables[check_option] = 'no'
check_index+=1
def comp_callback1(tag,callback=self.callback,option1=option1):
callback(tag,option1)
def comp_callback2(tag,callback=self.callback,option2=option2):
callback(tag,option2)
#labelpos = 'w', label_text = self.title, -inside of OptionMenu
self.comp1 = PmwFreeze.OptionMenu(group.interior(),
items = self.default_option, menubutton_width = 20, command = comp_callback1)
self.comp1.pack(side = LEFT, anchor = 'w', padx = 10, pady = 0)
self.comp2 = PmwFreeze.OptionMenu (group.interior(),
items = self.default_option, menubutton_width = 20, command = comp_callback2,
); self.comp2.pack(side = LEFT, anchor = 'w', padx = 10, pady = 0)
try: self.comp1.invoke(notes[0])
except Exception: null=[]
try: self.comp2.invoke(notes[1])
except Exception: null=[]
if 'simple_entry' in od.DisplayObject() and self.display_options != ['NA']:
self._option = option
### Create and pack a horizontal RadioSelect widget.
if len(override_default)>0: self.default_option = override_default
else: self.default_option = self.display_options[0]
def enter_callback(tag,enter_callback=self.enter_callback,option=option):
enter_callback(tag,option)
#self.title = self.title + '\t ' #entry_width=entrywidth
self.entry_field = PmwFreeze.EntryField(self.sf.interior(),
labelpos = 'e', label_text = self.title,
validate = enter_callback,
value = self.default_option
); self.entry_field.pack(anchor='w',padx = 10, pady = 1)
if 'enter' in od.DisplayObject() and self.display_options != ['NA']:
if use_scroll == 'yes': parent_type = self.sf.interior()
else: parent_type = self._parent
if insert_into_group == 'yes': parent_type = custom_group.interior(); enter_index+=1
self._option = option
### Create and pack a horizontal RadioSelect widget.
if len(override_default)>0: self.default_option = override_default
elif len(defaults) <1: self.default_option = self.display_options[0]
else: self.default_option = defaults[i]
#print self.default_option, self.title; kill
### entrytxt object for alt_exon_fold_cutoff in option
def custom_validate(tag,custom_validate=self.custom_validate,option=option):
validate = custom_validate(tag,option)
def custom_validate_p(tag,custom_validate_p=self.custom_validate_p,option=option):
validate = custom_validate_p(tag,option)
#print [validate], tag, option
if 'Genes_network' in option_list:
label_pos = 'e' ### Orients to the text left -> east
entrywidth = 20 ### Width of entry entry_width=entrywidth
elif 'GeneSelection' in option_list or 'GeneSelectionPredict' in option_list: ### For clustering UI
label_pos = 'e' ### Orients to the text left -> east
entrywidth = 20 ### Width of entry entry_width=entrywidth
elif 'JustShowTheseIDs' in option_list or 'JustShowTheseIDsPredict' in option_list: ### For clustering UI
label_pos = 'e' ### Orients to the text left -> east
entrywidth = 20 ### Width of entry entry_width=entrywidth
else:
label_pos = 'e'
try:
if float(self.default_option) <= 1: use_method = 'p'
else: use_method = 'i'
except ValueError:
#self.default_option = 'CHANGE TO A NUMERIC VALUE'; use_method = 'i'
self.default_option = string.replace(self.default_option,'---','')
use_method = 'i'
if use_method == 'p':
self.entry_field = PmwFreeze.EntryField(parent_type,
labelpos = label_pos, label_text = self.title, validate = custom_validate_p,
value = self.default_option, hull_borderwidth = 1)
if use_method == 'i':
self.entry_field = PmwFreeze.EntryField(parent_type,
labelpos = label_pos, label_text = self.title, validate = custom_validate,
value = self.default_option, hull_borderwidth = 1)
#if 'GeneSelection' in option_list:
#self.entry_field.component("entry").configure(width=5)
if insert_into_group == 'no': self.entry_field.pack(anchor = 'w', padx = 10, pady = 0)
elif enter_index == 1: self.entry_field1 = self.entry_field
elif enter_index == 2: self.entry_field2 = self.entry_field
elif enter_index == 3: self.entry_field3 = self.entry_field
elif enter_index == 4: self.entry_field4 = self.entry_field
elif enter_index == 5: self.entry_field5 = self.entry_field
if len(notes)>0: Label(self._parent, text=notes).pack()
if 'multiple-checkbox' in od.DisplayObject() and self.display_options != ['NA']:
if use_scroll == 'yes': parent_type = self.sf.interior()
else: parent_type = self._parent
self._option = option
if len(override_default)>0: self.default_option = override_default
elif len(defaults) <1: self.default_option = self.display_options[0]
else: self.default_option = defaults[i]
def checkbuttoncallback(tag,state,checkbuttoncallback=self.checkbuttoncallback,option=option):
checkbuttoncallback(tag,state,option)
### Create and pack a vertical RadioSelect widget, with checkbuttons.
self.checkbuttons = PmwFreeze.RadioSelect(parent_type,
buttontype = 'checkbutton', orient = 'vertical',
labelpos = 'w', command = self.checkbuttoncallback,
label_text = self.title, hull_borderwidth = 2)
self.checkbuttons.pack(padx = 10, pady = 0)
### Add some buttons to the checkbutton RadioSelect.
for text in self.display_options:
if text != ['NA']:
self.checkbuttons.add(text)
#if 'common-' not in text and 'all-' not in text:
#self.checkbuttons.invoke(text)
if len(notes)>0: Label(self._parent, text=notes).pack()
if 'single-checkbox' in od.DisplayObject() and self.display_options != ['NA']:
if use_scroll == 'yes': parent_type = self.sf.interior()
else: parent_type = self._parent
if defaults == 'comps': parent_type = self._parent; orient_type = 'top'
if insert_into_group == 'yes': parent_type = custom_group.interior(); check_index+=1
if defaults == 'groups': parent_type = self.sf.interior(); orient_type = 'top'
self._option = option
proceed = 'yes'
"""if option == 'export_splice_index_values':
if analysis_method != 'splicing-index': proceed = 'no' ### only export corrected constitutive ratios if splicing index method chosen"""
if proceed == 'yes':
if len(override_default)>0: self.default_option = override_default
elif len(defaults) <1: self.default_option = self.display_options[0]
else: self.default_option = defaults[i]
if self.default_option != 'NA':
def checkbuttoncallback(tag,state,checkbuttoncallback=self.checkbuttoncallback,option=option):
checkbuttoncallback(tag,state,option)
### Create and pack a vertical RadioSelect widget, with checkbuttons.
self.checkbuttons = PmwFreeze.RadioSelect(parent_type,
buttontype = 'checkbutton', command = checkbuttoncallback)
#hull_borderwidth = 2, hull_relief = 'ridge')
if insert_into_group == 'no': self.checkbuttons.pack(anchor = 'w',side = orient_type, padx = 10, pady = 1)
elif check_index == 1: checkbuttons1 = self.checkbuttons
elif check_index == 2: checkbuttons2 = self.checkbuttons
elif check_index == 3: checkbuttons3 = self.checkbuttons
elif check_index == 4: checkbuttons4 = self.checkbuttons
### Add some buttons to the checkbutton RadioSelect.
self.checkbuttons.add(self.title)
if self.default_option == 'yes': self.checkbuttons.invoke(self.title)
else: self._user_variables[option] = 'no'
custom_group_endpoints = ['ge_ptype', 'get_additional', 'expression_threshold', 'run_goelite', 'gene_expression_cutoff', 'microRNA_prediction_method']
try:
eod = option_db['expression_threshold']
if eod.ArrayOptions() == ['NA']:
custom_group_endpoints.append('rpkm_threshold') ### Ensures that if analyzing pre-compiled gene expression values, only certain items are shown and in a frame
custom_group_endpoints.remove('expression_threshold')
#insert_into_group = 'yes'
except Exception: pass
if option in custom_group_endpoints and insert_into_group == 'yes':
### This is employed when we want to place several items into a group frame together.
### Since this is a generic class, we need to setup special cases to do this, however,
### this same code could be used in other instances as well
reorganize = 'no'
self.group_tag = 'GO-Elite Over-Representation and Filtering Parameters'; pady_int = 5
if 'run_goelite' in option_list: self.group_tag = 'Gene Expression Analysis Options'; pady_int = 1
if 'microRNA_prediction_method' in option_list: self.group_tag = 'Advanced Options'; pady_int = 1; reorganize = 'yes'
try: checkbuttons1.pack(anchor = 'w', side = 'top', padx = 9, pady = 0)
except Exception: null=[]
try: checkbuttons2.pack(anchor = 'w', side = 'top', padx = 9, pady = 0)
except Exception: null=[]
try: checkbuttons3.pack(anchor = 'w', side = 'top', padx = 9, pady = 0)
except Exception: null=[]
try: checkbuttons4.pack(anchor = 'w', side = 'top', expand = 1, padx = 9, pady = 0)
except Exception: null=[]
try: radiobuttons2.pack(side = orient_type, expand = 1, padx = 10, pady = 5)
except Exception: null=[]
try: comp1.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: radiobuttons1.pack(side = orient_type, expand = 1, padx = 10, pady = 5)
except Exception: null=[]
if reorganize == 'yes':
try: comp2.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: self.entry_field1.pack(anchor = 'w', padx = 10, pady = 0)
except Exception: null=[]
try: self.entry_field2.pack(anchor = 'w', padx = 10, pady = 0);
except Exception: null=[]
try: self.entry_field3.pack(anchor = 'w', padx = 10, pady = 0)
except Exception: null=[]
try: self.entry_field4.pack(anchor = 'w', padx = 10, pady = 0)
except Exception: null=[]
try: self.entry_field5.pack(anchor = 'w', padx = 10, pady = 0)
except Exception: null=[]
if reorganize == 'no':
try: comp2.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: comp3.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: comp4.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: comp5.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: comp6.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: comp7.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: comp8.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: comp9.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: comp10.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: comp11.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: comp12.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
try: comp13.pack(anchor = 'w', padx = 10, pady = pady_int)
except Exception: null=[]
enter_index=0; radio_index=0; dropdown_index=0
if 'ge_ptype' in option or 'expression_threshold' in option or 'gene_expression_cutoff' in option or 'rpkm_threshold' in option:
custom_group = PmwFreeze.Group(self.sf.interior(),tag_text = self.group_tag)
custom_group.pack(fill = 'both', expand = 1, padx = 10, pady = 10)
insert_into_group = 'yes'
#i+=1 ####Keep track of index
if len(option_list)>0: ### Used when visualizing WikiPathways (no option_list supplied - all parameters hard coded)
#def quitcommand(): parent.destroy; sys.exit()
#self.button = Button(text=" Quit ", command=quitcommand)
#self.button.pack(side = 'bottom', padx = 10, pady = 10)
if 'input_cdf_file' in option_list: ### For the CEL file selection window, provide a link to get Library files
button_text = 'Download Library Files'; d_url = 'http://www.affymetrix.com/support/technical/byproduct.affx?cat=arrays'
self.d_url = d_url; text_button = Button(self._parent, text=button_text, command=self.Dlinkout); text_button.pack(side = 'left', padx = 5, pady = 5)
if 'GeneSelectionPredict' in option_list:
run_button = Button(self._parent, text='Run Analysis', command=self.runPredictGroups)
run_button.pack(side = 'right', padx = 10, pady = 10)
else:
continue_to_next_win = Button(text = 'Continue', command = self._parent.destroy)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10)
if 'input_annotation_file' in option_list:
skip_win = Button(text = 'Skip', command = self._parent.destroy)
skip_win.pack(side = 'right', padx = 10, pady = 10)
back_button = Button(self._parent, text="Back", command=self.goBack)
back_button.pack(side = 'right', padx =10, pady = 5)
quit_win = Button(self._parent, text="Quit", command=self.quit)
quit_win.pack(side = 'right', padx =10, pady = 5)
button_text = 'Help'
url = 'http://www.altanalyze.org/help_main.htm'; self.url = url
pdf_help_file = 'Documentation/AltAnalyze-Manual.pdf'; pdf_help_file = filepath(pdf_help_file); self.pdf_help_file = pdf_help_file
try: help_button = Button(self._parent, text=button_text, command=self.GetHelpTopLevel); help_button.pack(side = 'left', padx = 5, pady = 5)
except Exception: help_button = Button(self._parent, text=button_text, command=self.linkout); help_button.pack(side = 'left', padx = 5, pady = 5)
if 'species' in option_list:
new_species_button = Button(self._parent, text='Add New Species', command=self.newSpecies)
new_species_button.pack(side = 'left', padx = 5, pady = 5)
def runPredictGroupsTest():
self.runPredictGroups(reportOnly=True)
if 'GeneSelectionPredict' in option_list:
expFilePresent = self.verifyExpressionFile()
if expFilePresent:
button_instance = Button(self._parent, text='Test Settings', command=runPredictGroupsTest)
button_instance.pack(side = 'left', padx = 5, pady = 5)
if 'build_exon_bedfile' in option_list and array_type == 'RNASeq':
self.pdf_help_file = filepath('AltDatabase/kallisto/license.txt')
button_instance = Button(self._parent, text='Kallisto License', command=self.openPDFHelp)
button_instance.pack(side = 'left', padx = 5, pady = 5)
self._parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
self._parent.mainloop()
def verifyExpressionFile(self):
continue_analysis = False ### See if the input file is already present
try:
expFile = fl.ExpFile()
count = verifyFileLength(expFile[:-4]+'-steady-state.txt')
if count>1: continue_analysis = True
else:
count = verifyFileLength(expFile)
if count>1: continue_analysis = True
except Exception: pass
return continue_analysis
def goBack(self):
self._parent.destroy()
selected_options = selected_parameters; selected_options2=[] ### If we don't do this we get variable errors
if 'Library' == selected_options[-1]: selected_options[-1] = ''
for i in selected_options:
if i!='Library': selected_options2.append(i)
selected_options = selected_options2
try:
while selected_options[-2]==selected_options[-1]:
selected_options = selected_options[:-1] ### When clicking back on the next loop of a back, makes sure you don't get looped back to the same spot
except Exception: selected_options = selected_options
if len(selected_options)<3: run_parameter = 'no'
else: run_parameter = selected_options[:-1], self._user_variables
AltAnalyze.AltAnalyzeSetup(run_parameter); sys.exit()
def newSpecies(self):
self._user_variables['species'] = 'Add Species'
self._parent.destroy()
def runPredictGroups(self,reportOnly=False):
column_metric = self.Results()['column_metric_predict']
column_method = self.Results()['column_method_predict']
GeneSetSelection = self.Results()['GeneSetSelectionPredict']
PathwaySelection = self.Results()['PathwaySelectionPredict']
GeneSelection = self.Results()['GeneSelectionPredict']
JustShowTheseIDs = self.Results()['JustShowTheseIDsPredict']
ExpressionCutoff = self.Results()['ExpressionCutoff']
CountsCutoff = self.Results()['CountsCutoff']
rho_cutoff = self.Results()['rho_cutoff']
FoldDiff = self.Results()['FoldDiff']
SamplesDiffering = self.Results()['SamplesDiffering']
try: featurestoEvaluate = self.Results()['featuresToEvaluate']
except Exception: featurestoEvaluate = 'Genes'
removeOutliers = self.Results()['removeOutliers']
restrictBy = self.Results()['restrictBy']
excludeCellCycle = self.Results()['excludeCellCycle']
gsp = GeneSelectionParameters(species,array_type,vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setJustShowTheseIDs(JustShowTheseIDs)
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(ExpressionCutoff,CountsCutoff,FoldDiff,SamplesDiffering,
removeOutliers,featurestoEvaluate,restrictBy,excludeCellCycle,column_metric,column_method,rho_cutoff)
self._user_variables['gsp'] = gsp
import RNASeq
expFile = fl.ExpFile()
mlp_instance = fl.MLP()
count = verifyFileLength(expFile[:-4]+'-steady-state.txt')
if count>1: expFile = expFile[:-4]+'-steady-state.txt'
if reportOnly:
### Only used to report back what the number of regulated genes are if the gene expression file is present
reload(RNASeq)
try: report = RNASeq.singleCellRNASeqWorkflow(species, array_type, expFile, mlp_instance, parameters=gsp, reportOnly=reportOnly)
except Exception: report = traceback.format_exc()
if 'options_result_in_no_genes' in report:
report = 'Options are too stringent. Try relaxing the thresholds.'
try: InfoWindow(report, 'Continue')
except Exception: print report
else:
### If the parameters look OK, or user wishes to run, collapse this GUI adn proceed (once exited it will run)
self._parent.quit()
self._parent.destroy()
"""
values = expFile, mlp_instance, gsp, reportOnly
StatusWindow(values,'predictGroups') ### display an window with download status
root = Tk()
root.title('AltAnalyze: Evaluate Sampled Groupings')
gu = GUI(root,'PredictGroups',[],'')
nextStep = gu.Results()['next']
group_selected = gu.Results()['group_select']
if nextStep == 'UseSelected':
print group_selected;sys.exit()
group_selected = group_selected
### When nothing returned here, the full analysis will run
else:
#print 're-initializing window'
AltAnalyze.AltAnalyzeSetup((selected_parameters,user_variables)); sys.exit()
"""
def GetHelpTopLevel(self):
message = ''
self.message = message; self.online_help = 'Online Documentation'; self.pdf_help = 'Local PDF File'
tl = Toplevel(); self._tl = tl; nulls = '\t\t\t\t'; tl.title('Please select one of the options')
self.sf = PmwFreeze.ScrolledFrame(self._tl,
labelpos = 'n', label_text = '',
usehullsize = 1, hull_width = 320, hull_height = 200)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Options')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
filename = 'Config/icon.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 20); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
text_button2 = Button(group.interior(), text=self.online_help, command=self.openOnlineHelp); text_button2.pack(side = 'top', padx = 5, pady = 5)
try: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
except Exception: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
tl.mainloop()
def openPDFHelp(self):
if os.name == 'nt':
try: os.startfile('"'+self.pdf_help_file+'"')
except Exception: os.system('open "'+self.pdf_help_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+self.pdf_help_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+self.pdf_help_file+'"')
if 'license' not in self.pdf_help_file:
try: self._tl.destroy()
except Exception: pass
def openOnlineHelp(self):
try: webbrowser.open(self.url)
except Exception: null=[]
self._tl.destroy()
def linkout(self):
try: webbrowser.open(self.url)
except Exception: null=[]
def Dlinkout(self):
try: webbrowser.open(self.d_url)
except Exception: null=[]
def setvscrollmode(self, tag):
self.sf.configure(vscrollmode = tag)
def info(self):
tkMessageBox.showinfo("title","message",parent=self._parent)
def deleteWindow(self):
#tkMessageBox.showwarning("Quit","Use 'Quit' button to end program!",parent=self._parent)
self._parent.destroy(); sys.exit()
def tldeleteWindow(self,tl):
try: tl.quit(); tl.destroy()#; print 1
except Exception: tl.destroy()#; print 2
def quit(self):
try: self._parent.quit(); self._parent.destroy(); sys.exit()
except Exception: self._parent.quit(); sys.exit()
def continue_win(self):
### Currently not used - can be used to check data integrity before closing a window
try: self._parent.quit(); self._parent.destroy(); sys.exit()
except Exception: self._parent.quit(); sys.exit()
def chooseDirectory(self,option):
tag = tkFileDialog.askdirectory(parent=self._parent)
self._user_variables[option] = tag
def chooseFile(self,option):
tag = tkFileDialog.askopenfile(parent=self._parent)
self._user_variables[option] = tag.name
def getPath(self,option):
if 'dir' in option or 'folder' in option:
try: dirPath = tkFileDialog.askdirectory(parent=self._parent,initialdir=self.default_dir)
except Exception:
self.default_dir = ''
try: dirPath = tkFileDialog.askdirectory(parent=self._parent,initialdir=self.default_dir)
except Exception:
try: dirPath = tkFileDialog.askdirectory(parent=self._parent)
except Exception:
print_out = "AltAnalyze is unable to initialize directory opening.\nThis error may be due to one of the following issues:\n"
print_out += "1) (if running directly from source code) Tkinter/PMW components are not installed or are incompatible.\n"
print_out += "2) (if running directly from source code) Python version is untested with AltAnalyze.\n"
print_out += "3) There is a conflict between the AltAnalyze packaged version of python on the OS.\n\n"
print_out += "Contact genmapp@gladstone.ucsf.edu if this error persists with your system information.\n"
print_out += "Alternatively, try AltAnalyze using command-line options (http://www.AltAnalyze.org)."
try: InfoWindow(print_out,'Continue')
except Exception: print print_out
try: self._parent.destroy(); sys.exit()
except Exception: sys.exit()
self.default_dir = dirPath
entrytxt = self.pathdb[option]; entrytxt.set(dirPath)
self._user_variables[option] = dirPath
try: file_location_defaults['PathDir'].SetLocation(dirPath) ### Possible unknown exception here... may need to correct before deployment
except Exception:
try:
### Entry was deleted from Config file - re-create it
fl = FileLocationData('local', dirPath, 'all')
file_location_defaults['PathDir'] = fl
except Exception: null=[]
exportDefaultFileLocations(file_location_defaults)
if 'file' in option:
try: tag = tkFileDialog.askopenfile(parent=self._parent,initialdir=self.default_file)
except Exception:
self.default_file = ''
try: tag = tkFileDialog.askopenfile(parent=self._parent,initialdir=self.default_file)
except Exception:
try: tag = tkFileDialog.askopenfile(parent=self._parent)
except Exception:
print_out = "AltAnalyze is unable to initialize directory opening.\nThis error may be due to one of the following issues:\n"
print_out += "1) (if running directly from source code) Tkinter/PMW components are not installed or are incompatible.\n"
print_out += "2) (if running directly from source code) Python version is untested with AltAnalyze.\n"
print_out += "3) There is a conflict between the AltAnalyze packaged version of python on the OS.\n\n"
print_out += "Contact genmapp@gladstone.ucsf.edu if this error persists with your system information.\n"
print_out += "Alternatively, try AltAnalyze using command-line options (see documentation at http://AltAnalyze.org)."
try: InfoWindow(print_out,'Continue')
except Exception: print print_out
try: self._parent.destroy(); sys.exit()
except Exception: sys.exit()
try: filePath = tag.name #initialdir=self.default_dir
except AttributeError: filePath = ''
filePath_dir = string.join(string.split(filePath,'/')[:-1],'/')
self.default_file = filePath_dir
entrytxt = self.pathdb[option]
entrytxt.set(filePath)
self._user_variables[option] = filePath
try: file_location_defaults['PathFile'].SetLocation(filePath_dir)
except Exception:
try:
### Entry was deleted from Config file - re-create it
fl = FileLocationData('local', filePath_dir, 'all')
file_location_defaults['PathFile'] = fl
except Exception: null = None
exportDefaultFileLocations(file_location_defaults)
def Report(self,tag,option):
output = tag
return output
def __repr__(self,tag,option): return self.Report(tag,option)
def Results(self):
for i in self._user_variables:
user_variables[i]=self._user_variables[i]
return self._user_variables
def custom_validate(self, text, option):
self._user_variables[option] = text
#try: text = float(text);return 1
#except ValueError: return -1
def enter_callback(self, tag, option):
if self.defaults == 'batch':
### Bath removal array annotation UI
self._user_variables[option,'batch'] = tag
else:
self._user_variables[option] = tag
def custom_validate_p(self, text, option):
#print [option],'text:', text
self._user_variables[option] = text
try:
text = float(text)
if text <1:return 1
else:return -1
except ValueError:return -1
def callback(self, tag, option):
#print 'Button',[option], tag,'was pressed.'
change_var = ''
self._user_variables[option] = tag
if option == 'dbase_version':
###Export new species info
exportDBversion(tag); change_var = 'all'
try: self.changeVendorSelection(); self.changeSpeciesSelection(); self.changeArraySelection()
except Exception: null=[]
elif option == 'species':
try: self.changeArraySelection()
except Exception: null=[]
elif option == 'manufacturer_selection':
try: self.changeSpeciesSelection(); self.changeArraySelection()
except Exception: null=[]
#elif option == 'array_type':
#self.checkSpeciesArraySelection(array_type)
elif option == 'analysis_method':
if tag == 'ASPIRE':
try: self.entry_field2.setentry('0.2')
except Exception: null=[]
self._user_variables['alt_exon_fold_cutoff'] = '0.2'
elif tag == 'linearregres':
try: self.entry_field2.setentry('2')
except Exception: null=[]
self._user_variables['alt_exon_fold_cutoff'] = '2'
elif option == 'selected_version':
current_species_names = db_versions[tag]
current_species_names.sort()
try: self.speciescomp.setitems(['---']+current_species_names)
except Exception: null = [] ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
try: self.speciescomp2.setitems(['---']+current_species_names)
except Exception: null = [] ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
try: self.speciescomp3.setitems(['---']+current_species_names)
except Exception: null = [] ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
"""
### Doesn't work right now because self.entry_field only has one object instance and complicated to get multiple
elif option == 'ORA_algorithm':
if tag == 'Permute p-value':
try: self.entry_field.setentry('2000')
except Exception: null=[]
self._user_variables['permutation'] = '2000'
elif tag == 'Fisher Exact Test':
try: self.entry_field.setentry('NA')
except Exception: null=[]
self._user_variables['permutation'] = '0'
"""
def callbackComboBox(self, tag, option):
""" Similiar to the above, callback, but ComboBox uses unique methods """
#print 'Button',[option], tag,'was pressed.'
if option == 'interactionDirs' or 'PathwaySelection' in option or 'HeatmapAdvanced' in option: ### Allow multiple selections
if len(tag)==0:
self._user_variables[option] = None ### no options selected
else:
if isinstance(tag, tuple) or isinstance(tag, list):
pass
else:
try: tag = self._user_variables[option] ### This indicates that this option was previously set and in the new window was not explicitly set, suggesting we should re-apply the original settings
except Exception: None
try: ### Occurs when no items selected
if len(tag[0])==1: ### Hence, just one item selected
self._user_variables[option] = [tag]
except Exception:
pass
if len(list(tag)[0]) == 1:
tag_list = [tag]
else: tag_list = list(tag)
self._user_variables[option] = tag_list
else:
self._user_variables[option] = tag
if option == 'selected_version':
current_species_names = db_versions[tag]
current_species_names.sort()
current_species_names = ['---']+current_species_names
species_option = current_species_names[0]
try:
self.speciescomp._list.setlist(current_species_names) ### This is the way we set a new list for ComboBox
### Select the best default option to display (keep existing or re-set)
if 'selected_species1' in self._user_variables: ### If this is the species downloader
species_option = 'selected_species1'
else:
for i in self._user_variables:
if 'species' in i: species_option = i
default = self.getBestDefaultSelection(species_option,current_species_names)
self.speciescomp.selectitem(default)
except Exception: None ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
try:
self.speciescomp2._list.setlist(current_species_names)
default = self.getBestDefaultSelection('selected_species2',current_species_names)
self.speciescomp2.selectitem(default)
except Exception: None ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
try:
self.speciescomp3._list.setlist(current_species_names)
default = self.getBestDefaultSelection('selected_species3',current_species_names)
self.speciescomp3.selectitem(default)
except Exception: None ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
elif option == 'dbase_version':
###Export new species info
exportDBversion(tag); change_var = 'all'
try: self.changeVendorSelection(); self.changeSpeciesSelection(); self.changeArraySelection()
except Exception: null=[]
elif option == 'species':
try: self.changeArraySelection()
except Exception: null=[]
elif option == 'manufacturer_selection':
try: self.changeSpeciesSelection(); self.changeArraySelection()
except Exception: null=[]
#elif option == 'array_type':
#self.checkSpeciesArraySelection(array_type)
elif option == 'analysis_method':
if tag == 'ASPIRE':
try: self.entry_field2.setentry('0.2')
except Exception: null=[]
self._user_variables['alt_exon_fold_cutoff'] = '0.2'
elif tag == 'linearregres':
try: self.entry_field2.setentry('2')
except Exception: null=[]
self._user_variables['alt_exon_fold_cutoff'] = '2'
elif 'GeneSetSelection' in option or 'GeneSetSelectionPredict' in option:
#print option,tag
if 'network' in option: suffix='_network'
else: suffix=''
#species = self._user_variables['species']
try:
if 'Ontology' in tag: directory = 'gene-go'
else: directory = 'gene-mapp'
if tag in self._user_variables and 'StoredGeneSets' not in tag: ### Want to reload StoredGeneSets each time
supported_genesets = self._user_variables[tag]
#print 'loading pathways from memory'
else:
#print 'importing all pathways from scratch'
supported_genesets = listAllGeneSetCategories(species,tag,directory)
self._user_variables[tag] = supported_genesets ### Store this so we don't waste time reloading it the next time
self.pathwayselect._list.setlist(supported_genesets)
##### self.pathwayselect.selectitem(supported_genesets[0]) # This sets the default for multi- or single-combo boxes... DON'T SELECT UNLESS YOU WANT TO HAVE TO DE-SELECT IT
##### self._user_variables['PathwaySelection'+suffix] = supported_genesets[0] ### store this default
##### self._user_variables['PathwaySelectionPredict'+suffix] = supported_genesets[0] ### store this default
self.pathwayselect.selectitem(0,setentry = 1) # Select the item but then re-set the list to deselect it
self.pathwayselect._list.setlist(supported_genesets)
except Exception, e:
#print e
pass ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
def getBestDefaultSelection(self,option,option_list):
default = option_list[0] ### set the default to the first option listed
if option in self._user_variables:
selected = self._user_variables[option]
if selected in option_list: ### If selected species exists in the new selected version of EnsMart
default = selected
else:
self._user_variables[option] = default ### Hence, the default has changed, so re-set it
return default
def changeSpeciesSelection(self):
vendor = self._user_variables['manufacturer_selection'] ### Get vendor (stored as global)
current_species_names = getSpeciesList(vendor) ### Need to change species, manufacturers and array_type
for i in self._option_list:
if 'species' in i: ### Necessary if the user changes dbase_version and selects continue to accept the displayed species name (since it's note directly invoked)
last_selected_species = self._user_variables[i]
if last_selected_species not in current_species_names:
try: self._user_variables[i] = current_species_names[0]
except Exception: null = []
try:
self.speciescomp._list.setlist(current_species_names)
self.speciescomp.selectitem(current_species_names[0])
except Exception: null = [] ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
def checkSpeciesArraySelection(self,array_type):
current_species_names = getSpeciesForArray(array_type)
try:
self.speciescomp._list.setlist(current_species_names)
self.speciescomp.selectitem(current_species_names[0])
except Exception: null = [] ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
for i in self._option_list:
if 'species' in i: ### Necessary if the user changes dbase_version and selects continue to accept the displayed species name (since it's note directly invoked)
try: self._user_variables[i] = current_species_names[0]
except Exception: null = []
def changeArraySelection(self):
species_name = self._user_variables['species'] ### Get species (stored as global)
vendor = self._user_variables['manufacturer_selection'] ### Get vendor (stored as global)
species = species_codes[species_name].SpeciesCode()
current_array_types, manufacturer_list = getArraysAndVendors(species,vendor)
if 'Other ID'==vendor: ### Populate the current_array_types as all Ensembl linked systems
current_array_types = getSupportedGeneSystems(species,'uid-gene')
try:
self.arraycomp._list.setlist(current_array_types)
self.arraycomp.selectitem(current_array_types[0])
except Exception:
pass ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
for i in self._option_list:
if 'array_type' in i: ### Necessary if the user changes dbase_version and selects continue to accept the displayed species name (since it's note directly invoked)
if self._user_variables[i] not in current_array_types: ### If the current array type is supported by the new species selection, keep it the same
try: self._user_variables[i] = current_array_types[0]
except Exception: null = []
def changeVendorSelection(self):
species_name = self._user_variables['species'] ### Get species (stored as global)
vendor = self._user_variables['manufacturer_selection']
current_array_types, manufacturer_list = getArraysAndVendors(species,'')
try:
self.vendorcomp._list.setlist(manufacturer_list)
self.vendorcomp.selectitem(manufacturer_list[0])
except Exception: null = [] ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
for i in self._option_list:
if 'manufacturer_selection' in i: ### Necessary if the user changes dbase_version and selects continue to accept the displayed species name (since it's note directly invoked)
if vendor in manufacturer_list: new_vendor = vendor
else: new_vendor = manufacturer_list[0]
try: self._user_variables[i] = new_vendor
except Exception: null = []
def multcallback(self, tag, state):
if state: action = 'pressed.'
else: action = 'released.'
"""print 'Button', tag, 'was', action, \
'Selection:', self.multiple.getcurselection()"""
self._user_variables[option] = tag
def checkbuttoncallback(self, tag, state, option):
if state: action = 'pressed.'
else: action = 'released.'
"""print 'Button',[option], tag, 'was', action, \
'Selection:', self.checkbuttons.getcurselection()"""
if state==0: tag2 = 'no'
else: tag2 = 'yes'
#print '---blahh', [option], [tag], [state], [action], [self.checkbuttons.getcurselection()]
self._user_variables[option] = tag2
################# Database Version Handling ##################
class PreviousResults:
def __init__(self, user_variables):
self._user_variables = user_variables
def Results(self): return self._user_variables
def exportDefaultFileLocations(file_location_defaults):
### If the user supplies new defaults, over-write the existing
fn=filepath('Config/default-files.csv'); data = open(fn,'w')
for app in file_location_defaults:
fl_list = file_location_defaults[app]
try:
for fl in fl_list:
values = [app,fl.Status(),fl.Location(),fl.Species()]
values = '"'+string.join(values,'","')+'"'+'\n'
data.write(values)
except Exception:
fl = fl_list
values = [app,fl.Status(),fl.Location(),fl.Species()]
values = '"'+string.join(values,'","')+'"'+'\n'
data.write(values)
data.close()
def getSpeciesList(vendor):
try: current_species_dirs = unique.read_directory('/AltDatabase')
except Exception: ### Occurs when the version file gets over-written with a bad directory name
try:
### Remove the version file and wipe the species file
os.remove(filepath('Config/version.txt'))
#raw = export.ExportFile('Config/species.txt'); raw.close()
os.mkdir(filepath('AltDatabase'))
AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
except Exception: null = []
try: elite_db_versions = returnDirectoriesNoReplace('/AltDatabase')
except Exception:
try: os.mkdir(filepath('AltDatabase'))
except Exception: null=[]
elite_db_versions = returnDirectoriesNoReplace('/AltDatabase')
try: exportDBversion(elite_db_versions[0])
except Exception: exportDBversion('')
current_species_dirs = unique.read_directory('/AltDatabase')
current_species_names=[]; manufacturers_list=[]
for species in species_codes:
species_code = species_codes[species].SpeciesCode()
if species_code in current_species_dirs:
if len(vendor)>0:
proceed = 'no'
for array_name in array_codes:
manufacturer = array_codes[array_name].Manufacturer()
if manufacturer == vendor:
if species_code in array_codes[array_name].SpeciesCodes(): proceed = 'yes'
else:
for array_name in array_codes:
manufacturer = array_codes[array_name].Manufacturer()
if species_code in array_codes[array_name].SpeciesCodes():
manufacturers_list.append(manufacturer)
proceed = 'yes'
if proceed == 'yes': current_species_names.append(species)
current_species_names.sort(); manufacturers_list = unique.unique(manufacturers_list); manufacturers_list.sort()
if len(vendor)>0:
return current_species_names
else: return current_species_names, manufacturers_list
def exportDBversion(db_version):
import datetime
db_version = string.replace(db_version,'Plant','')
today = str(datetime.date.today()); today = string.split(today,'-'); today = today[1]+'/'+today[2]+'/'+today[0]
exportVersionData(db_version,today,'Config/')
def exportVersionData(version,version_date,dir):
new_file = dir+'version.txt'
data = export.ExportFile(new_file)
data.write(str(version)+'\t'+str(version_date)+'\n'); data.close()
def importResourceList():
filename = 'Config/resource_list.txt'
fn=filepath(filename); resource_list=[]
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
resource = data
resource_list.append(resource)
return resource_list
def importGeneList(filename,limit=None):
### Optionally limit the number of results imported
gene_list=[]
fn=filepath(filename); resource_list=[]; count=0
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
gene = string.split(data,'\t')[0]
if ' ' in gene:
gene = string.split(gene,' ')[0]
if gene not in gene_list and gene != 'GeneID' and gene != 'UID' and gene != 'probesetID':
gene_list.append(gene)
count+=1
if limit != None:
if limit==count: break
gene_list = string.join(gene_list,',')
return gene_list
def exportJunctionList(filename,limit=None):
### Optionally limit the number of results imported
parent = export.findParentDir(filename)
file = export.findFilename(filename)
export_file = parent+'/top'+str(limit)+'/'+file
#export_file = filename[:-4]+'-top-'+str(limit)+'.txt'
eo = export.ExportFile(export_file)
fn=filepath(filename); count=0; firstLine=True
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine = False
elif '-' in t[0]:
junctions = string.split(data,'\t')[0]
junctions = string.replace(junctions,'|',' ')
junctions = string.join(string.split(junctions,':')[1:],':')
eo.write(junctions+'\n')
count+=1
if limit==count: break
else:
junctions = t[1] #Atg9a:ENSMUSG00000033124:E1.1-E3.1|ENSMUSG00000033124:E1.1-E3.2
junctions = string.split(junctions,'|') #ENSMUSG00000032314:I11.1_55475101-E13.1-ENSMUSG00000032314:E11.1-E13.1|ENSMUSG00000032314:I11.1_55475153;I11.1_55475101
for junction_pair in junctions:
if '-' in junction_pair:
try:
a,b = string.split(junction_pair,'-ENS')
b = 'ENS'+b
eo.write(a+' '+b+'\n')
count+=1
if limit==count: break
except Exception:
pass
if count>limit: break
if count>limit: break
eo.close()
return export_file
def importConfigFile():
#print "importing config file"
filename = 'Config/config.txt'
fn=filepath(filename); config_db={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
config_type,options = string.split(data,'\t')
config_db[config_type] = options
return config_db
def exportConfigFile(config_db):
#print "exporting config file"
new_file = 'Config/config.txt'
data = export.ExportFile(new_file)
for config in config_db:
data.write(config+'\t'+str(config_db[config])+'\n'); data.close()
def remoteOnlineDatabaseVersions():
db_versions = importOnlineDatabaseVersions()
return db_versions_vendors,db_versions
def importOnlineDatabaseVersions():
filename = 'Config/array_versions.txt'
fn=filepath(filename); global db_versions; db_versions={}; global db_versions_vendors; db_versions_vendors={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
species,version,vendors = string.split(data,'\t')
vendors = string.split(vendors,'|')
ad = ArrayData('','',vendors,'',species)
version = string.replace(version,'Plus','') ### The user won't understand the Plus which relates to the GO-Elite version (AltAnalyze does not have a plus but we want the Plus for goelite)
try: db_versions[version].append(species)
except KeyError: db_versions[version] = [species]
try: db_versions_vendors[version].append(ad)
except KeyError: db_versions_vendors[version] = [ad]
return db_versions
def getOnlineDBConfig(file_location_defaults,root):
base_url = file_location_defaults['url'].Location()
#fln1,status1 = update.download(base_url+'test/Config/species_all.txt','Config/','')
#fln2,status2 = update.download(base_url+'test/Config/source_data.txt','Config/','')
#fln3,status3 = update.download(base_url+'test/Config/array_versions.txt','Config/','')
fln1,status1 = update.download(base_url+'Config/species_all.txt','Config/','')
fln2,status2 = update.download(base_url+'Config/source_data.txt','Config/','')
fln3,status3 = update.download(base_url+'Config/array_versions.txt','Config/','')
try:
if 'Internet' not in status3:
print 'Finished downloading the latest configuration files.'; root.destroy()
else:
try: WarningWindow(status3,'Error Encountered!'); root.destroy(); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
except Exception: print status3; root.destroy(); sys.exit()
except Exception: null=[]
def getOnlineEliteDatabase(file_location_defaults,db_version,new_species_codes,update_goelite_resources,root):
base_url = file_location_defaults['url'].Location()
goelite_url = file_location_defaults['goelite'].Location()
dbs_added = 0
AltAnalyze_folders = read_directory(''); Cytoscape_found = 'no'
for dir in AltAnalyze_folders:
if 'Cytoscape_' in dir: Cytoscape_found='yes'
if Cytoscape_found == 'no':
fln,status = update.download(goelite_url+'Cytoscape/cytoscape.tar.gz','','')
if 'Internet' not in status: print "Cytoscape program folder downloaded."
count = verifyFileLength('AltDatabase/TreeView/TreeView.jar')
if count==0:
fln,status = update.download(goelite_url+'TreeView.zip','AltDatabase/NoVersion','')
if 'Internet' not in status: print "TreeView program downloaded."
fln,status = update.download(goelite_url+'Databases/'+db_version+'Plus/OBO.zip','AltDatabase/goelite/','')
if 'Internet' not in status: print "Gene Ontology structure files downloaded."
for species_code in new_species_codes:
#print [base_url+'AltDatabase/'+db_version+'/'+species_code+'.zip']
if species_code == 'Mm' or species_code == 'Hs' or species_code == 'Rn': specific_extension=''
else: specific_extension='_RNASeq'
fln,status = update.download(base_url+'AltDatabase/updated/'+db_version+'/'+species_code+specific_extension+'.zip','AltDatabaseNoVersion/','')
if 'Internet' not in status:
print 'Finished downloading the latest species database files.'
dbs_added+=1
#print goelite_url+'Databases/'+db_version+'Plus/'+species_code+'.zip'
try: fln,status = update.download(goelite_url+'Databases/'+db_version+'Plus/'+species_code+'.zip','AltDatabase/goelite/','')
except Exception: print "No species GO-Elite database found."
if update_goelite_resources == 'yes': ### Get all additional GeneSet database types (can be lengthy download times)
try: getAdditionalOnlineResources(species_code, 'All Resources',None)
except Exception: print "Unable to update additional GO-Elite resources."
if 'Internet' not in status: print "GO-Elite database installed." ; dbs_added+=1
else: print "No species GO-Elite database found."
try: os.mkdir(filepath('AltDatabase/'+species_code))
except Exception: null=[]
if dbs_added>0:
print_out = "New species data successfully added to database."
if root !='' and root !=None:
try: InfoWindow(print_out,'Continue')
except Exception: print print_out
else: print print_out
try: root.destroy()
except Exception: null=[]
else:
if root !='' and root !=None: WarningWindow(status,'Error Encountered!'); root.destroy(); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
else: print status; root.destroy(); sys.exit()
class SupprotedArrays:
def __init__(self, array_name, library_file, annotation_file, species, array_type):
self.array_name = array_name; self.library_file = library_file; self.annotation_file = annotation_file
self.species = species; self.array_type = array_type
def ArrayName(self): return self.array_name
def LibraryFile(self): return self.library_file
def AnnotationFile(self): return self.annotation_file
def Species(self): return self.species
def ArrayType(self): return self.array_type
def __repr__(self): return self.ArrayName()
def importSupportedArrayInfo():
filename = 'Config/ArrayFileInfo.txt'; x=0
fn=filepath(filename); global supproted_array_db; supproted_array_db={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
array_name,library_file,annotation_file,species,array_type = string.split(data,'\t')
if x==0: x=1
else:
sd = SupprotedArrays(array_name,library_file,annotation_file,species,array_type)
supproted_array_db[array_name] = sd
return supproted_array_db
def exportSupportedArrayInfo():
fn=filepath('Config/ArrayFileInfo.txt'); data = open(fn,'w'); x=0
header = string.join(['ArrayName','LibraryFile','AnnotationFile','Species','ArrayType'],'\t')+'\n'
data.write(header)
for array_name in supproted_array_db:
sd = supproted_array_db[array_name]
values = [array_name,sd.LibraryFile(),sd.AnnotationFile(),sd.Species(),sd.ArrayType()]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
class SystemData:
def __init__(self, syscode, sysname, mod):
self._syscode = syscode; self._sysname = sysname; self._mod = mod
def SystemCode(self): return self._syscode
def SystemName(self): return self._sysname
def MOD(self): return self._mod
def __repr__(self): return self.SystemCode()+'|'+self.SystemName()+'|'+self.MOD()
def getSystemInfo():
importSystemInfo()
return system_codes
def importSystemInfo():
filename = 'Config/source_data.txt'; x=0
fn=filepath(filename); global system_list; system_list=[]; global system_codes; system_codes={}; mod_list=[]
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if '!DOCTYPE' in data:
fn2 = string.replace(fn,'.txt','_archive.txt')
import shutil; shutil.copyfile(fn2,fn) ### Bad file was downloaded (with warning)
importSystemInfo(); break
elif '<html>' in data:
print_out = "WARNING!!! Connection Error. Proxy may not be allowed from this location."
try: WarningWindow(print_out,' Continue ')
except NameError: print print_out
importSystemInfo(); break
else:
try: sysname=t[0];syscode=t[1]
except Exception: sysname=''
try: mod = t[2]
except Exception: mod = ''
if x==0: x=1
else:
system_list.append(sysname)
ad = SystemData(syscode,sysname,mod)
if len(mod)>1: mod_list.append(sysname)
system_codes[sysname] = ad
return system_list,mod_list
def exportSystemInfo():
if len(system_codes)>0:
filename = 'Config/source_data.txt'
fn=filepath(filename); data = open(fn,'w')
header = string.join(['System','SystemCode','MOD_status'],'\t')+'\n'
data.write(header)
for sysname in system_codes:
ad = system_codes[sysname]
values = string.join([sysname,ad.SystemCode(),ad.MOD()],'\t')+'\n'
data.write(values)
data.close()
class SpeciesData:
def __init__(self, abrev, species, algorithms):
self._abrev = abrev; self._species = species; self._algorithms = algorithms
def SpeciesCode(self): return self._abrev
def SpeciesName(self): return self._species
def Algorithms(self): return self._algorithms
def __repr__(self): return self.Report()
def getSpeciesInfo():
### Used by AltAnalyze
global integrate_online_species; integrate_online_species = 'yes'
importSpeciesInfo(); species_names={}
for species_full in species_codes:
sc = species_codes[species_full]; abrev = sc.SpeciesCode()
species_names[abrev] = species_full
return species_names
def remoteSpeciesInfo():
global integrate_online_species; integrate_online_species = 'yes'
importSpeciesInfo()
return species_codes
def remoteSpeciesAlt():
### Replicates the output of GO-Elite's species importer
global integrate_online_species; integrate_online_species = 'yes'
importSpeciesInfo()
species_names={}
for species in species_codes:
sd = species_codes[species]
species_names[sd.SpeciesCode()] = sd
return species_names
def importSpeciesInfo():
try:
if integrate_online_species == 'yes': filename = 'Config/species_all.txt'
else: filename = 'Config/species.txt'
except Exception: filename = 'Config/species.txt'
fn=filepath(filename); global species_list; species_list=[]; global species_codes; species_codes={}; x=0
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
try:
try: abrev,species,algorithms = string.split(data,'\t')
except Exception: abrev,species = string.split(data,'\t'); algorithms = ''
except Exception:
if '!DOCTYPE': print_out = "A internet connection could not be established.\nPlease fix the problem before proceeding."
else: print_out = "Unknown file error encountered."
IndicatorWindow(print_out,'Continue')
raw = export.ExportFile(fn); raw.close(); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
if x==0: x=1
else:
algorithms = string.split(algorithms,'|')
species_list.append(species)
sd = SpeciesData(abrev,species,algorithms)
species_codes[species] = sd
def exportSpeciesInfo(species_codes):
fn=filepath('Config/species.txt'); data = open(fn,'w'); x=0
header = string.join(['species_code','species_name','compatible_algorithms'],'\t')+'\n'
data.write(header)
for species in species_codes:
sd = species_codes[species]; algorithms = string.join(sd.Algorithms(),'|')
values = [sd.SpeciesCode(),species,algorithms]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
class ArrayGroupData:
def __init__(self, array_header, group, group_name):
self._array_header = array_header; self._group = group; self._group_name = group_name
def Array(self): return self._array_header
def Group(self): return self._group
def setGroup(self,group): self._group = group
def GroupName(self): return self._group_name
def setGroupName(self,group_name): self._group_name = group_name
def Report(self): return self.Array()
def __repr__(self): return self.Report()
def importArrayGroupsSimple(expr_group_dir,cel_files):
array_group_list = []; group_db={}
fn=filepath(expr_group_dir)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
array_header,group,group_name = string.split(data,'\t')
if group_name == 'NA': group_name = 'None'
#print [array_header],cel_files
if (array_header in cel_files) or len(cel_files)==0: ### restrict import to array files listed in the groups file
try: group = int(group); group_db[group]=group_name
except ValueError: print group, group_name;kill
agd = ArrayGroupData(array_header,group,group_name)
array_group_list.append(agd)
if len(cel_files)>0:
if len(cel_files)!=len(array_group_list):
#print len(cel_files),len(array_group_list)
#print cel_files
array_group_list2=[]
for i in array_group_list:
if i.Array() not in cel_files:
print [i.Array()], 'not in CEL file dir (in groups file)'
array_group_list2.append(i.Array())
for i in cel_files:
if i not in array_group_list2:
print [i], 'not in groups file (in CEL file dir)'
raise NameError('Samples In Groups Not Found In Dir')
return array_group_list,group_db
class ArrayData:
def __init__(self, abrev, array, manufacturer, constitutive_source, species):
self._abrev = abrev; self._array = array; self._manufacturer = manufacturer; self._species = species
self._constitutive_source = constitutive_source
def ArrayCode(self): return self._abrev
def ArrayName(self): return self._array
def Manufacturer(self): return self._manufacturer
def ConstitutiveSource(self): return self._constitutive_source
def SpeciesCodes(self): return self._species
def setSpeciesCodes(self,species): species = self._species
def __repr__(self): return self.ArrayCode()+'|'+str(self.SpeciesCodes())+'|'+str(self.Manufacturer())
def remoteArrayInfo():
importArrayInfo()
return array_codes
def importArrayInfo():
filename = 'Config/arrays.txt'; x=0
fn=filepath(filename); global array_list; array_list=[]; global array_codes; array_codes={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
abrev,array,manufacturer,constitutive_source,species = string.split(data,'\t')
if x==0: x=1
else:
species = string.split(species,'|')
array_list.append(array)
ad = ArrayData(abrev,array,manufacturer,constitutive_source,species)
array_codes[array] = ad
return array_list
def exportArrayInfo(array_codes):
fn=filepath('Config/arrays.txt'); data = open(fn,'w'); x=0
header = string.join(['array_type','array_name','manufacturer','constitutive_source','compatible_species'],'\t')+'\n'
data.write(header)
for array in array_codes:
ad = array_codes[array]; species = string.join(ad.SpeciesCodes(),'|')
values = [ad.ArrayCode(),array,ad.Manufacturer(),ad.ConstitutiveSource(),species]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
class FileLocationData:
def __init__(self, status, location, species):
self._status = status; self._location = location; self._species = species
def Status(self): return self._status
def Location(self): return self._location
def SetLocation(self,location): self._location = location
def Species(self): return self._species
def __repr__(self): return self.Report()
def importDefaultFileLocations():
filename = 'Config/default-files.csv'; x=0
fn=filepath(filename); file_location_defaults={}
for line in open(fn,'rU').readlines():
line = string.replace(line,',','\t') ### Make tab-delimited (had to make CSV since Excel would impoperly parse otherwise)
data = cleanUpLine(line)
###Species can be multiple species - still keep in one field
app,status,location,species = string.split(data,'\t')
fl = FileLocationData(status, location, species)
if species == 'all': file_location_defaults[app] = fl
else:
try: file_location_defaults[app].append(fl)
except KeyError: file_location_defaults[app] = [fl]
return file_location_defaults
def exportDefaultFileLocations(file_location_defaults):
### If the user supplies new defaults, over-write the existing
fn=filepath('Config/default-files.csv'); data = open(fn,'w')
for app in file_location_defaults:
fl_list = file_location_defaults[app]
try:
for fl in fl_list:
values = [app,fl.Status(),fl.Location(),fl.Species()]
values = '"'+string.join(values,'","')+'"'+'\n'
data.write(values)
except Exception:
fl = fl_list
values = [app,fl.Status(),fl.Location(),fl.Species()]
values = '"'+string.join(values,'","')+'"'+'\n'
data.write(values)
data.close()
def exportDefaultFileLocations(file_location_defaults):
### If the user supplies new defaults, over-write the existing
fn=filepath('Config/default-files.csv'); data = open(fn,'w')
for app in file_location_defaults:
fl_list = file_location_defaults[app]
try:
for fl in fl_list:
values = [app,fl.Status(),fl.Location(),fl.Species()]
values = '"'+string.join(values,'","')+'"'+'\n'
data.write(values)
except Exception:
fl = fl_list
values = [app,fl.Status(),fl.Location(),fl.Species()]
values = '"'+string.join(values,'","')+'"'+'\n'
data.write(values)
data.close()
def exportGroups(exp_file_location_db,array_group_list,filetype='Groups'):
### If the user supplies new defaults, over-write the existing
for dataset_name in exp_file_location_db:
fl = exp_file_location_db[dataset_name]
groups_file = fl.GroupsFile()
if filetype =='Batch':
groups_file = string.replace(groups_file,'groups.','batch.')
fn=filepath(groups_file); data = open(fn,'w')
value_list = [] ### Sort grouped results based on group number
for agd in array_group_list:
values = [agd.Array(), str(agd.Group()), agd.GroupName()]
values = string.join(values,'\t')+'\n'; value_list.append(((agd.Group(),agd.Array()),values))
value_list.sort()
for values in value_list: data.write(values[-1])
data.close()
def exportComps(exp_file_location_db,comp_group_list):
### If the user supplies new defaults, over-write the existing
for dataset_name in exp_file_location_db:
fl = exp_file_location_db[dataset_name]; comps_file = fl.CompsFile()
fn=filepath(comps_file); data = open(fn,'w')
for comp_num, groups in comp_group_list:
group1, group2 = groups
values = [str(group1), str(group2)]
values = string.join(values,'\t')+'\n'; data.write(values)
data.close()
class Defaults:
def __init__(self, abrev, array, species):
self._abrev = abrev; self._array = array; self._species = species
def ArrayCode(self): return self._abrev
def ArrayName(self): return self._array
def Species(self): return self._species
def __repr__(self): return self.Report()
def verifyFileLength(filename):
count = 0
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>9: break
except Exception: null=[]
return count
def getGeneSystem(filename):
firstRow=True
count=0
system = 'Symbol'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
if firstRow: firstRow=False
else:
id = string.split(line,'\t')[0]
if 'ENS' in id: system = 'Ensembl'
count+=1
if count>9: break
except Exception: null=[]
return system
def determinePlatform(filename):
platform = ''
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
#print [line]
if len(line)>0: platform = line
except Exception: null=[]
return platform
def importDefaults(array_type,species):
filename = 'Config/defaults-expr.txt'
expr_defaults = importDefaultInfo(filename,array_type)
#perform_alt_analysis, expression_data_format, dabg_p, expression_threshold, avg_all_for_ss, include_raw_data
filename = 'Config/defaults-alt_exon.txt'
alt_exon_defaults = importDefaultInfo(filename,array_type)
#analysis_method, alt_exon_fold_variable, p_threshold, filter_probeset_types, gene_expression_cutoff, perform_permutation_analysis, permute_p_threshold,run_MiDAS, export_splice_index_values = values
filename = 'Config/defaults-funct.txt'
functional_analysis_defaults = importDefaultInfo(filename,array_type)
#analyze_functional_attributes,microRNA_prediction_method = functional_analysis_defaults
filename = 'Config/defaults-goelite.txt'
goelite_defaults = importDefaultInfo(filename,array_type)
return expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults
def importDefaultInfo(filename,array_type):
fn=filepath(filename)
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
if '-expr' in filename:
array_abrev, dabg_p, rpkm_threshold, gene_exp_threshold, exon_exp_threshold, exon_rpkm_threshold, expression_threshold, perform_alt_analysis, analyze_as_groups, expression_data_format, normalize_feature_exp, normalize_gene_data, avg_all_for_ss, include_raw_data, probability_algorithm, FDR_statistic, batch_effects, marker_finder, visualize_results, run_lineage_profiler, run_goelite = string.split(data,'\t')
if array_type == array_abrev:
return dabg_p, rpkm_threshold, gene_exp_threshold, exon_exp_threshold, exon_rpkm_threshold, expression_threshold, perform_alt_analysis, analyze_as_groups, expression_data_format, normalize_feature_exp, normalize_gene_data, avg_all_for_ss, include_raw_data, probability_algorithm, FDR_statistic, batch_effects, marker_finder, visualize_results, run_lineage_profiler, run_goelite
if '-alt' in filename:
array_abrev, analysis_method, additional_algorithms, filter_probeset_types, analyze_all_conditions, p_threshold, alt_exon_fold_variable, additional_score, permute_p_threshold, gene_expression_cutoff, remove_intronic_junctions, perform_permutation_analysis, export_splice_index_values, run_MiDAS, calculate_splicing_index_p, filter_for_AS = string.split(data,'\t')
if array_type == array_abrev:
return [analysis_method, additional_algorithms, filter_probeset_types, analyze_all_conditions, p_threshold, alt_exon_fold_variable, additional_score, permute_p_threshold, gene_expression_cutoff, remove_intronic_junctions, perform_permutation_analysis, export_splice_index_values, run_MiDAS, calculate_splicing_index_p, filter_for_AS]
if '-funct' in filename:
array_abrev, analyze_functional_attributes, microRNA_prediction_method = string.split(data,'\t')
if array_type == array_abrev:
return [analyze_functional_attributes,microRNA_prediction_method]
if '-goelite' in filename:
array_abrev, ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, pathway_permutations, mod, returnPathways, get_additional = string.split(data,'\t')
if array_type == array_abrev:
return [ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, pathway_permutations, mod, returnPathways, get_additional]
class OptionData:
def __init__(self,option,displayed_title,display_object,notes,array_options,global_default):
self._option = option; self._displayed_title = displayed_title; self._notes = notes
self._array_options = array_options; self._display_object = display_object
if len(global_default)>0:
if '|' in global_default:
global_default = string.split(global_default,'|') ### store as a list
self._default_option = global_default
def Option(self): return self._option
def VariableName(self): return self._option
def Display(self): return self._displayed_title
def setDisplay(self,display_title): self._displayed_title = display_title
def DisplayObject(self): return self._display_object
def Notes(self): return self._notes
def setNotes(self,notes): self._notes = notes
def DefaultOption(self): return self._default_option
def setDefaultOption(self,default_option): self._default_option = default_option
def setNotes(self,notes): self._notes = notes
def ArrayOptions(self): return self._array_options
def setArrayOptions(self,array_options): self._array_options = array_options
def Options(self): return self._array_options
def __repr__(self): return self.Option()+'|'+self.Display()
def importUserOptions(array_type,vendor=None):
filename = 'Config/options.txt'; option_db={}; option_list_db={}
fn=filepath(filename); x=0
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
data = string.replace(data,'\k','\n') ###Used \k in the file instead of \n, since these are removed above
if array_type == 'RNASeq':
data = string.replace(data,'probeset','junction')
data = string.replace(data,'probe set','junction')
data = string.replace(data,'CEL file','BED, BAM, TAB or TCGA junction file')
if vendor == 'Agilent':
if 'CEL file' in data:
data = string.replace(data,'CEL file','Feature Extraction file')
data = string.replace(data,' (required)','')
t = string.split(data,'\t')
#option,mac_displayed_title,pc_displayed_title,pc_display2,linux_displayed_title,display_object,group,notes,description,global_default = t[:10]
option,displayed_title,display_object,group,notes,description,global_default = t[:7]
"""
if os.name == 'nt':
import platform
if '64' in platform.machine(): displayed_title = pc_display2
elif '32' in platform.machine(): displayed_title = pc_display2
elif '64bit' in platform.architecture(): displayed_title = pc_display2
else: displayed_title = pc_displayed_title
elif 'darwin' in sys.platform: displayed_title = mac_displayed_title
elif 'linux' in sys.platform: displayed_title = linux_displayed_title
else: displayed_title = linux_displayed_title
"""
if 'junction' in displayed_title: displayed_title+=' '
"""if array_type == 'RNASeq':
if option == 'dabg_p': ### substitute the text for the alternatitve text in notes
displayed_title = notes"""
if x == 0:
i = t.index(array_type) ### Index position of the name of the array_type selected by user (or arbitrary to begin with)
x = 1
else:
array_options = t[i]
if array_type == "3'array":
"""
if 'normalize_gene_data' in data and vendor != 'Agilent':
array_options = 'NA' ### only applies currently to Agilent arrays """
if 'channel_to_extract' in data and vendor != 'Agilent':
array_options = 'NA' ### only applies currently to Agilent arrays
array_options = string.split(array_options,'|')
od = OptionData(option,displayed_title,display_object,notes,array_options,global_default)
option_db[option] = od
try: option_list_db[group].append(option) ###group is the name of the GUI menu group
except KeyError: option_list_db[group] = [option]
return option_list_db,option_db
class SummaryResults:
def __init__(self):
def showLink(event):
idx= int(event.widget.tag_names(CURRENT)[1])
webbrowser.open(LINKS[idx])
LINKS=('http://www.altanalyze.org','')
self.LINKS = LINKS
try: tl = Toplevel()
except Exception: tl = Tkinter.Toplevel()
tl.title('AltAnalyze')
filename = 'Config/icon.gif'
fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(tl); can.pack(side='top'); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW); use_scroll = 'no'
label_text_str = 'AltAnalyze Result Summary'; height = 250; width = 700
self.sf = PmwFreeze.ScrolledFrame(tl,
labelpos = 'n', label_text = label_text_str,
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
tl.mainloop()
class FeedbackWindow:
def __init__(self,message,button_text,button_text2):
self.message = message; self.button_text = button_text; self.button_text2 = button_text2
parent = Tk(); self._parent = parent; nulls = '\t\t\t\t\t\t\t'; parent.title('Attention!!!')
self._user_variables={}
filename = 'Config/warning_big.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(parent); can.pack(side='left',padx = 10); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
Label(parent, text='\n'+self.message+'\n'+nulls).pack()
text_button = Button(parent, text=self.button_text, command=self.button1); text_button.pack(side = 'bottom', padx = 5, pady = 5)
text_button2 = Button(parent, text=self.button_text2, command=self.button2); text_button2.pack(side = 'bottom', padx = 5, pady = 5)
parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
parent.mainloop()
def button1(self): self._user_variables['button']=self.button_text; self._parent.destroy()
def button2(self): self._user_variables['button']=self.button_text2; self._parent.destroy()
def ButtonSelection(self): return self._user_variables
def deleteWindow(self):
#tkMessageBox.showwarning("Quit Selected","Use 'Quit' button to end program!",parent=self._parent)
self._parent.destroy(); sys.exit()
class IndicatorWindowSimple:
def __init__(self,message,button_text):
self.message = message; self.button_text = button_text
parent = Tk(); self._parent = parent; nulls = '\t\t\t\t\t\t\t'; parent.title('Attention!!!')
filename = 'Config/warning_big.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(parent); can.pack(side='left',padx = 10); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
Label(parent, text='\n'+self.message+'\n'+nulls).pack()
text_button = Button(parent, text=self.button_text, command=parent.destroy); text_button.pack(side = 'bottom', padx = 5, pady = 5)
parent.mainloop()
class IndicatorWindow:
def __init__(self,message,button_text):
self.message = message; self.button_text = button_text
parent = Tk(); self._parent = parent; nulls = '\t\t\t\t\t\t\t'; parent.title('Attention!!!')
filename = 'Config/warning_big.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(parent); can.pack(side='left',padx = 10); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
Label(parent, text='\n'+self.message+'\n'+nulls).pack()
quit_button = Button(parent, text='Quit', command=self.quit); quit_button.pack(side = 'bottom', padx = 5, pady = 5)
text_button = Button(parent, text=self.button_text, command=parent.destroy); text_button.pack(side = 'bottom', padx = 5, pady = 5)
parent.mainloop()
def quit(self):
try: self._parent.quit(); self._parent.destroy(); sys.exit()
except Exception: self._parent.quit(); sys.exit()
class DownloadWindow:
def __init__(self,message,option1,option2):
self._user_variables = user_variables
if len(option2)==2: option2,option3 = option2; num_options = 3; self.option3 = option3
else: num_options = 2
self.message = message; self.option1 = option1; self.option2 = option2
parent = Tk(); self._parent = parent; nulls = '\t\t\t\t\t\t\t'; parent.title('Attention!!!')
filename = 'Config/warning_big.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(parent); can.pack(side='left',padx = 10); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
Label(parent, text='\n'+self.message+'\n'+nulls).pack()
text_button = Button(parent, text=self.option1, command=self.selected1); text_button.pack(side = 'bottom', padx = 5, pady = 5)
text_button2 = Button(parent, text=self.option2, command=self.selected2); text_button2.pack(side = 'bottom', padx = 5, pady = 5)
if num_options == 3:
text_button3 = Button(parent, text=self.option3, command=self.selected3); text_button3.pack(side = 'bottom', padx = 5, pady = 5)
parent.mainloop()
def selected1(self):
self._user_variables['selected_option']=1; self._parent.destroy()
def selected2(self):
self._user_variables['selected_option']=2; self._parent.destroy()
def selected3(self):
self._user_variables['selected_option']=3; self._parent.destroy()
def Results(self): return self._user_variables
class IndicatorLinkOutWindow:
def __init__(self,message,button_text,url):
self.message = message; self.button_text = button_text; nulls = '\t\t\t\t\t\t\t';
parent = Tk(); self._parent = parent; parent.title('Attention!!!'); self.url = url
filename = 'Config/warning_big.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(parent); can.pack(side='left',padx = 10); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
Label(parent, text='\n'+self.message+'\n'+nulls).pack()
continue_button = Button(parent, text='Continue', command=parent.destroy); continue_button.pack(side = 'bottom', padx = 5, pady = 5)
text_button = Button(parent, text=self.button_text, command=self.linkout); text_button.pack(side = 'bottom', padx = 5, pady = 5)
parent.mainloop()
def linkout(self):
webbrowser.open(self.url)
class IndicatorChooseWindow:
def __init__(self,message,button_text):
self.message = message; self .button_text = button_text
parent = Tk(); self._parent = parent; nulls = '\t\t\t\t\t\t\t'; parent.title('Attention!!!')
filename = 'Config/icon.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(parent); can.pack(side='left'); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
Label(parent, text='\n'+self.message+'\n'+nulls).pack()
#text_button = Button(parent, text=self.button_text, command=parent.destroy); text_button.pack(side = 'bottom', padx = 5, pady = 5)
option=''
def foldercallback(callback=self.callback,option=option): self.chooseDirectory(option)
choose_win = Button(self._parent, text=self.button_text,command=foldercallback); choose_win.pack(padx = 3, pady = 3)
quit_button = Button(parent, text='Quit', command=self.quit); quit_button.pack(padx = 3, pady = 3)
parent.mainloop()
def quit(self):
try: self._parent.quit(); self._parent.destroy(); sys.exit()
except Exception: self._parent.quit(); sys.exit()
def callback(self, tag, option): null = ''
def chooseDirectory(self,option):
tag = tkFileDialog.askdirectory(parent=self._parent)
### Below is code specific for grabbing the APT location
import ResultsExport_module
apt_location = ResultsExport_module.getAPTDir(tag)
if 'bin' not in apt_location:
print_out = "WARNING!!! Unable to find a valid Affymetrix Power Tools directory."
try: WarningWindow(print_out,' Continue ')
except NameError: print print_out
self._tag = ''
else: self._tag = apt_location
self.destroy_win()
def destroy_win(self):
try: self._parent.quit(); self._parent.destroy()
except Exception: self._parent.quit(); sys.exit()
def Folder(self): return self._tag
class WarningWindow:
def __init__(self,warning,window_name):
try: tkMessageBox.showerror(window_name, warning)
except Exception:
print warning
#print window_name; sys.exit()
kill
class InfoWindow:
def __init__(self,dialogue,header):
try: tkMessageBox.showinfo(header, dialogue)
except Exception:
print dialogue
#print 'Attempted to open a GUI that is not accessible...exiting program';sys.exit()
#print "Analysis finished...exiting AltAnalyze."; sys.exit()
class MainMenu:
def __init__(self):
parent = Tk()
self._parent = parent
parent.title('AltAnalyze: Introduction')
self._user_variables={}
filename = 'Config/logo.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(parent); can.pack(side='top',fill=BOTH); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
"""
### Create and pack a horizontal RadioSelect widget.
def buttoncallback(tag,callback=self.callback):
callback(tag)
horiz = PmwFreeze.RadioSelect(parent,
labelpos = 'w', command = buttoncallback,
label_text = 'AltAnalyze version 1.155 Main', frame_borderwidth = 2,
frame_relief = 'ridge'
); horiz.pack(fill = 'x', padx = 10, pady = 10)
for text in ['Continue']: horiz.add(text)
"""
### Add some buttons to the horizontal RadioSelect
continue_to_next_win = Tkinter.Button(text = 'Begin Analysis', command = parent.destroy)
continue_to_next_win.pack(side = 'bottom', padx = 5, pady = 5);
info_win = Button(self._parent, text="About AltAnalyze", command=self.info)
info_win.pack(side = 'bottom', padx = 5, pady = 5)
parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
parent.mainloop()
def info(self):
"""
###Display the information using a messagebox
about = 'AltAnalyze version 2.0.9 beta.\n'
about+= 'AltAnalyze is an open-source, freely available application covered under the\n'
about+= 'Apache open-source license. Additional information can be found at:\n'
about+= 'http://www.altanalyze.org\n'
about+= '\nDeveloped by:\n\tNathan Salomonis\n\tBruce Conklin\nGladstone Institutes 2008-2011'
tkMessageBox.showinfo("About AltAnalyze",about,parent=self._parent)
"""
def showLink(event):
idx= int(event.widget.tag_names(CURRENT)[1])
webbrowser.open(LINKS[idx])
LINKS=('http://www.altanalyze.org','')
self.LINKS = LINKS
tl = Toplevel() ### Create a top-level window separate than the parent
txt=Text(tl)
#filename = 'Config/icon.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
#can = Canvas(tl); can.pack(side='left'); can.config(width=img.width(), height=img.height())
#can.create_image(2, 2, image=img, anchor=NW)
txt.pack(expand=True, fill="both")
txt.insert(END, 'AltAnalyze version 2.0.9.3 beta.\n')
txt.insert(END, 'AltAnalyze is an open-source, freely available application covered under the\n')
txt.insert(END, 'Apache open-source license. Additional information can be found at:\n')
txt.insert(END, "http://www.altanalyze.org\n", ('link', str(0)))
txt.insert(END, '\nDeveloped by:\n\tNathan Salomonis\n\tBruce Conklin\nGladstone Institutes 2008-2011')
txt.tag_config('link', foreground="blue", underline = 1)
txt.tag_bind('link', '<Button-1>', showLink)
def deleteWindow(self):
#tkMessageBox.showwarning("Quit Selected","Use 'Quit' button to end program!",parent=self._parent)
self._parent.destroy(); sys.exit()
def callback(self, tag):
#print 'Button',[option], tag,'was pressed.'
self._user_variables['continue'] = tag
class LinkOutWindow:
def __init__(self,output):
### Text window with link included
url,text_list = output
def showLink(event):
idx= int(event.widget.tag_names(CURRENT)[1])
webbrowser.open(LINKS[idx])
LINKS=(url,'')
self.LINKS = LINKS
tl = Toplevel() ### Create a top-level window separate than the parent
txt=Text(tl)
txt.pack(expand=True, fill="both")
for str_item in text_list:
txt.insert(END, str_item+'\n')
txt.insert(END, "http://www.altanalyze.org\n", ('link', str(0)))
txt.tag_config('link', foreground="blue", underline = 1)
txt.tag_bind('link', '<Button-1>', showLink)
text_button = Button(parent, text=self.button_text, command=parent.destroy); text_button.pack(side = 'bottom', padx = 5, pady = 5)
parent.mainloop()
def exportCELFileList(cel_files,cel_file_dir):
fn=cel_file_dir+'/cel_files.txt'; data = open(fn,'w')
data.write('cel_files'+'\n') ###header
for cel_file in cel_files:
data.write(cel_file+'\n')
data.close()
return fn
def predictGroupsAndComps(cel_files,output_dir,exp_name):
fn1=output_dir+'/ExpressionInput/groups.'+exp_name+'.txt'; gdata = export.ExportFile(fn1)
fn2=output_dir+'/ExpressionInput/comps.'+exp_name+'.txt'; cdata = export.ExportFile(fn2)
fn3=output_dir+'/ExpressionInput/exp.'+exp_name+'.txt'
delimited_db={}; delim_type={}; files_exported = 'no'
for cel_file in cel_files:
cel_name = cel_file
cel_file = string.replace(cel_file,'.CEL','')
cel_file = string.replace(cel_file,'.cel','')
dashed_delim = string.split(cel_file,'-')
dot_delim = string.split(cel_file,'.')
under_delim = string.split(cel_file,'_')
if len(dashed_delim) == 2:
delim_type[1]=None
try: delimited_db[dashed_delim[0]].append(cel_name)
except KeyError: delimited_db[dashed_delim[0]] = [cel_name]
elif len(under_delim) == 2:
delim_type[2]=None
try: delimited_db[under_delim[0]].append(cel_name)
except KeyError: delimited_db[under_delim[0]] = [cel_name]
elif len(dot_delim) == 2:
delim_type[3]=None
try: delimited_db[dot_delim[0]].append(cel_name)
except KeyError: delimited_db[dot_delim[0]] = [cel_name]
if len(delim_type)==1 and len(delimited_db)>1: ###only 1 type of delimiter used and at least 2 groups present
group_index=0; group_db={}; files_exported = 'yes'
for group in delimited_db:
group_index+=1; group_db[str(group_index)]=None
for array in delimited_db[group]:
gdata.write(string.join([array,str(group_index),group],'\t')+'\n')
for index1 in group_db: ### Create a comps file for all possible comps
for index2 in group_db:
if index1 != index2:
cdata.write(string.join([index1,index2],'\t')+'\n')
gdata.close(); cdata.close()
if files_exported == 'no':
os.remove(fn1); os.remove(fn2)
try: ExpressionBuilder.checkArrayHeaders(fn3,fn1) ### Create just the groups template file
except Exception: null=[] ### This error will more likely occur since no expression file has been created
return files_exported
def formatArrayGroupsForGUI(array_group_list, category = 'GroupArrays'):
### Format input for GUI like the imported options.txt Config file, except allow for custom fields in the GUI class
option_db={}; option_list={}
if category != 'BatchArrays':
### Add a checkbox at the top to allow for automatic assignment of groups (e.g., Single Cell Data)
option='PredictGroups';displayed_title='Run de novo cluster prediction (ICGS) to discover groups, instead';display_object='single-checkbox';notes='';array_options=['---']
od = OptionData(option,displayed_title,display_object,notes,array_options,'')
option_db[option] = od
option_list[category] = [option]
for agd in array_group_list:
option = agd.Array(); array_options = [agd.GroupName()]; displayed_title=option; display_object='simple_entry'; notes=''
od = OptionData(option,displayed_title,display_object,notes,array_options,'')
option_db[option] = od
try: option_list[category].append(option) ###group is the name of the GUI menu group
except KeyError: option_list[category] = [option]
return option_db,option_list
def importExpressionFiles():
exp_file_location_db={}; exp_files=[]; parent_dir = 'ExpressionInput'+'/'+array_type
fn =filepath(parent_dir+'/'); dir_files = read_directory('/'+parent_dir)
stats_file_dir=''
for file in dir_files:
if 'exp.' in file: exp_files.append(file)
for file in exp_files:
stats_file = string.replace(file,'exp.','stats.')
groups_file = string.replace(file,'exp.','groups.')
comps_file = string.replace(file,'exp.','comps.')
if stats_file in dir_files: stats_file_dir = fn+stats_file
if groups_file in dir_files and comps_file in dir_files:
groups_file_dir = fn+groups_file; comps_file_dir = fn+comps_file
exp_file_dir = fn+file
fl = ExpressionFileLocationData(exp_file_dir,stats_file_dir,groups_file_dir,comps_file_dir)
exp_file_location_db[file] = fl
return exp_file_location_db
class ExpressionFileLocationData:
def __init__(self, exp_file, stats_file, groups_file, comps_file):
self._exp_file = exp_file; self._stats_file = stats_file; self._groups_file = groups_file
self._comps_file = comps_file; self.biotypes='NA'
import platform; self.architecture = platform.architecture()[0]
self.normalize_feature_exp = 'NA'
self.normalize_gene_data = 'NA'
self.runKallisto =''
def setExpFile(self, exp_file):self._exp_file=exp_file
def ExpFile(self): return self._exp_file
def StatsFile(self): return self._stats_file
def CountsFile(self):
import AltAnalyze
counts_file = string.replace(self.ExpFile(),'exp.','counts.')
file_length = AltAnalyze.verifyFileLength(counts_file)
if file_length>0:
return counts_file
else:
return self.ExpFile()
def GroupsFile(self): return self._groups_file
def CompsFile(self): return self._comps_file
def setArchitecture(self,architecture): self.architecture = architecture
def setAPTLocation(self,apt_location): self._apt_location = osfilepath(apt_location)
def setInputCDFFile(self,cdf_file): self._cdf_file = osfilepath(cdf_file)
def setCLFFile(self,clf_file): self._clf_file = osfilepath(clf_file)
def setBGPFile(self,bgp_file): self._bgp_file = osfilepath(bgp_file)
def setCELFileDir(self,cel_file_dir): self._cel_file_dir = osfilepath(cel_file_dir)
def setFeatureNormalization(self,normalize_feature_exp): self.normalize_feature_exp = normalize_feature_exp
def setExcludeLowExpressionExons(self, excludeNonExpExons): self.excludeNonExpExons = excludeNonExpExons
def setNormMatrix(self,normalize_gene_data): self.normalize_gene_data = normalize_gene_data
def setProbabilityStatistic(self,probability_statistic): self.probability_statistic = probability_statistic
def setFDRStatistic(self, FDR_statistic): self.FDR_statistic = FDR_statistic
def setBatchEffectRemoval(self,batch_effects): self.batch_effects = batch_effects
def setProducePlots(self,visualize_results): self.visualize_results = visualize_results
def setPerformLineageProfiler(self, run_lineage_profiler): self.run_lineage_profiler = run_lineage_profiler
def setCompendiumType(self,compendiumType): self.compendiumType = compendiumType
def setCompendiumPlatform(self,compendiumPlatform): self.compendiumPlatform = compendiumPlatform
def setMultiThreading(self, multithreading): self.multithreading = multithreading
def setVendor(self,vendor): self.vendor = vendor
def setPredictGroups(self, predictGroups): self.predictGroups = predictGroups
def setPredictGroupsParams(self, predictGroupsObjects): self.predictGroupsObjects = predictGroupsObjects
def setGraphicLinks(self,graphic_links): self.graphic_links = graphic_links ### file location of image files
def setSTDOUT(self, stdout): self.stdout = stdout
def setExonExpThreshold(self,exon_exp_threshold):
try: exon_exp_threshold = float(exon_exp_threshold)
except Exception: exon_exp_threshold = exon_exp_threshold
self.exon_exp_threshold = exon_exp_threshold
def setExonRPKMThreshold(self,exon_rpkm_threshold):
try: exon_rpkm_threshold = float(exon_rpkm_threshold)
except Exception: exon_rpkm_threshold = exon_rpkm_threshold
self.exon_rpkm_threshold = exon_rpkm_threshold
def setGeneExpThreshold(self,gene_exp_threshold):
try: gene_exp_threshold = float(gene_exp_threshold)
except Exception: gene_exp_threshold = gene_exp_threshold
self.gene_exp_threshold = gene_exp_threshold
def setJunctionExpThreshold(self,junction_exp_threshold):
try: junction_exp_threshold = float(junction_exp_threshold)
except Exception: junction_exp_threshold = junction_exp_threshold
self.junction_exp_threshold = junction_exp_threshold
def setRPKMThreshold(self,rpkm_threshold):
try: rpkm_threshold = float(rpkm_threshold)
except Exception: rpkm_threshold = rpkm_threshold
self.rpkm_threshold = rpkm_threshold
def setMarkerFinder(self,marker_finder): self.marker_finder = marker_finder
def FDRStatistic(self): return self.FDR_statistic
def multiThreading(self): return self.multithreading
def STDOUT(self): return self.stdout
def ExonExpThreshold(self): return self.exon_exp_threshold
def BatchEffectRemoval(self): return self.batch_effects
def MarkerFinder(self): return self.marker_finder
def PredictGroups(self): self.predictGroups
def PredictGroupsObjects(self): self.predictGroupsObjects
def ExonRPKMThreshold(self): return self.exon_rpkm_threshold
def GeneExpThreshold(self): return self.gene_exp_threshold
def JunctionExpThreshold(self): return self.junction_exp_threshold
def RPKMThreshold(self): return self.rpkm_threshold
def ProbabilityStatistic(self): return self.probability_statistic
def ProducePlots(self): return self.visualize_results
def PerformLineageProfiler(self): return self.run_lineage_profiler
def CompendiumType(self): return self.compendiumType
def CompendiumPlatform(self): return self.compendiumPlatform
def GraphicLinks(self): return self.graphic_links
def setArrayType(self,array_type): self._array_type = array_type
def setOutputDir(self,output_dir): self._output_dir = output_dir
def setBiotypes(self,biotypes): self.biotypes = biotypes
def setRootDir(self,parent_dir):
### Get directory above ExpressionInput
split_dirs = string.split(parent_dir,'ExpressionInput')
root_dir = split_dirs[0]
self._root_dir = root_dir + '/'
def setXHybRemoval(self,xhyb): self._xhyb = xhyb
def XHybRemoval(self): return self._xhyb
def setExonBedBuildStatus(self,bed_build_status): self.bed_build_status = bed_build_status
def setRunKallisto(self, runKallisto): self.runKallisto = runKallisto
def RunKallisto(self): return self.runKallisto
def setChannelToExtract(self,channel_to_extract): self.channel_to_extract = channel_to_extract
def ExonBedBuildStatus(self): return self.bed_build_status
def ChannelToExtract(self): return self.channel_to_extract
def FeatureNormalization(self): return self.normalize_feature_exp
def setUseJunctionsForGeneExpression(self, use_junctions_for_geneexpression): self.use_junctions_for_geneexpression = use_junctions_for_geneexpression
def useJunctionsForGeneExpression(self):
try: return self.use_junctions_for_geneexpression
except Exception: return False
def excludeLowExpressionExons(self): return self.excludeNonExpExons
def NormMatrix(self): return self.normalize_gene_data
def RootDir(self): return self._root_dir
def APTLocation(self): return self._apt_location
def InputCDFFile(self): return self._cdf_file
def CLFFile(self): return self._clf_file
def BGPFile(self): return self._bgp_file
def CELFileDir(self): return self._cel_file_dir
def BEDFileDir(self): return self._cel_file_dir+'/'
def ArrayType(self): return self._array_type
def OutputDir(self): return self._output_dir
def Vendor(self): return self.vendor
def setSpecies(self, species): self.species = species
def Species(self): return self.species
def setPlatformType(self, platformType): self.platformType = platformType
def setAnalysisMode(self, analysis_mode): self.analysis_mode = analysis_mode
def setMLP(self,mlpr): self.mlp = mlpr
def setExonMapFile(self, exonMapFile): self.exonMapFile = exonMapFile
def ExonMapFile(self): return self.exonMapFile
def setCorrelationDirection(self, correlationDirection): self.correlationDirection = correlationDirection
def CorrelationDirection(self): return self.correlationDirection
def MLP(self): return self.mlp
def PlatformType(self): return self.platformType
def AnalysisMode(self): return self.analysis_mode
def DatasetFile(self):
if 'exp.' in self.ExpFile():
dataset_dir = string.replace(self.ExpFile(),'exp.','DATASET-')
else:
parent = export.findParentDir(self.ExpFile())
file = export.findFilename(self.ExpFile())
if 'DATASET-' not in file:
dataset_dir = parent + 'DATASET-'+file
else:
dataset_dir = self.ExpFile()
dataset_dir = string.replace(dataset_dir,'ExpressionInput','ExpressionOutput')
return dataset_dir
def Architecture(self): return self.architecture
def BioTypes(self): return self.biotypes
def Report(self): return self.ExpFile()+'|'+str(len(self.StatsFile()))+'|'+str(len(self.GroupsFile()))+'|'+str(len(self.CompsFile()))
def __repr__(self): return self.Report()
class AdditionalAlgorithms:
def __init__(self, additional_algorithm):
self._additional_algorithm = additional_algorithm
def Algorithm(self): return self._additional_algorithm
def setScore(self,score): self._score = score
def Score(self): return self._score
def __repr__(self): return self.Algorithm()
def getDirectoryFiles():
status = 'repeat'
while status == 'repeat':
if backSelect == 'no' or 'InputExpFiles' == selected_parameters[-1]:
root = Tk(); root.title('AltAnalyze: Select Expression File for Filtering')
selected_parameters.append('InputExpFiles'); backSelect = 'no'
gu = GUI(root,option_db,option_list['InputExpFiles'],'')
else: gu = PreviousResults(old_options)
try: input_exp_file = gu.Results()['input_exp_file']
except KeyError: input_exp_file = '' ### Leave this blank so that the default directory is used
try: input_stats_file = gu.Results()['input_stats_file']
except KeyError: input_stats_file = '' ### Leave this blank so that the default directory is used
#if array_type == 'exon':
if 'steady-state' in input_exp_file or 'steady-state' in input_stats_file:
print_out = "Do not select steady-state expression files.."
IndicatorWindow(print_out,'Continue'); output_dir=''
elif len(input_exp_file)>0:
try: output_dir = gu.Results()['output_dir']
except KeyError: output_dir = '' ### Leave this blank so that the default directory is used
try: cel_files, array_linker_db = ExpressionBuilder.getArrayHeaders(input_exp_file)
except Exception:
print_out = "Input Expression file does not have a valid format."
IndicatorWindow(print_out,'Continue'); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
if len(cel_files)>0: status = 'continue'
else:
print_out = "The expression file:\n"+input_exp_file+"\ndoes not appear to be a valid expression file. Check to see that\nthis is the correct tab-delimited text file."
IndicatorWindow(print_out,'Continue')
else:
print_out = "No input expression file selected."
IndicatorWindow(print_out,'Continue')
if len(output_dir)<1:
### Set output to the same directory or parent if none selected
if 'ExpressionInput' in input_exp_file: i = -2
else: i = -1
output_dir = string.join(string.split(input_exp_file,'/')[:i],'/')
def getUpdatedParameters(array_type,species,run_from_scratch,file_dirs):
### Get default options for ExpressionBuilder and AltAnalyze
na = 'NA'; log = 'log'; no = 'no'
global user_variables; user_variables={}; global selected_parameters; selected_parameters = []
run_goelite=no; change_threshold=na;pathway_permutations=na;mod=na; ge_ptype = 'rawp';resources_to_analyze = na
ge_fold_cutoffs=2;ge_pvalue_cutoffs=0.05;filter_method=na;z_threshold=1.96;p_val_threshold=0.05
returnPathways = 'no'
option_list,option_db = importUserOptions(array_type)
global root
if run_from_scratch != 'Prefiltered': ### This is when AltAnalyze has finished an analysis
root = Tk()
root.title('AltAnalyze: Perform Additional Analyses')
selected_parameters.append('AdditionalOptions'); backSelect = 'no'
gu = GUI(root,option_db,option_list['AdditionalOptions'],'')
new_run = gu.Results()['new_run']
else: new_run = None
if new_run == 'Change Parameters and Re-Run': AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
else:
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = importDefaults(array_type,species)
option_db['get_additional'].setArrayOptions(['---']+importResourceList())
option_db['get_additional'].setDefaultOption('---')
default_resources = option_db['resources_to_analyze'].ArrayOptions()
import_dir1 = '/AltDatabase/goelite/'+species+'/gene-mapp'
import_dir2 = '/AltDatabase/goelite/'+species+'/gene-go'
try:
gene_mapp_list = read_directory(import_dir1)
gene_mapp_list.sort()
for file in gene_mapp_list:
resource = string.split(file,'-')[-1][:-4]
if resource != 'MAPP' and resource not in default_resources and '.txt' in file:
default_resources.append(resource)
except Exception: null=[]
try:
gene_go_list = read_directory(import_dir2)
gene_go_list.sort()
for file in gene_go_list:
resource = string.split(file,'-')[-1][:-4]
if resource != 'GeneOntology' and resource not in default_resources and 'version' not in resource and '.txt' in file:
default_resources.append(resource)
except Exception: null=[]
option_db['resources_to_analyze'].setArrayOptions(default_resources)
proceed = 'no'
while proceed == 'no':
root = Tk(); root.title('AltAnalyze: Pathway Analysis Parameters')
if 'filtered' in run_from_scratch: ### Not relevant for 'Process AltAnalyze filtered'
option_list['GOElite'] = option_list['GOElite'][3:]; goelite_defaults = goelite_defaults[3:]
selected_parameters.append('GOElite'); backSelect = 'no'
gu = GUI(root,option_db,option_list['GOElite'],goelite_defaults)
if 'filtered' not in run_from_scratch: ### Not relevant for 'Process AltAnalyze filtered'
ge_fold_cutoffs = gu.Results()['ge_fold_cutoffs']
ge_pvalue_cutoffs = gu.Results()['ge_pvalue_cutoffs']
ge_ptype = gu.Results()['ge_ptype']
filter_method = gu.Results()['filter_method']
z_threshold = gu.Results()['z_threshold']
returnPathways = gu.Results()['returnPathways']
p_val_threshold = gu.Results()['p_val_threshold']
change_threshold = gu.Results()['change_threshold']
resources_to_analyze = gu.Results()['resources_to_analyze']
pathway_permutations = gu.Results()['pathway_permutations']
ORA_algorithm = gu.Results()['ORA_algorithm']
mod = gu.Results()['mod']
get_additional = gu.Results()['get_additional']
try:
z_threshold = float(z_threshold)
change_threshold = float(change_threshold)-1 ### This reflects the > statement in the GO-Elite filtering
p_val_threshold = float(p_val_threshold)
pathway_permutations = int(pathway_permutations)
if run_from_scratch != 'Process AltAnalyze filtered':
ge_fold_cutoffs = float(ge_fold_cutoffs)
ge_pvalue_cutoffs = float(ge_pvalue_cutoffs)
proceed = 'yes'
except Exception:
print_out = "Invalid numerical entry. Try again."
IndicatorWindow(print_out,'Continue')
if get_additional != '---':
analysis = 'getAdditionalOnlineResources'
values = species,get_additional
StatusWindow(values,analysis) ### display an window with download status
try:
criterion_input_folder, criterion_denom_folder, main_output_folder = file_dirs
import GO_Elite
if run_from_scratch != 'Prefiltered': ### Only applies to AltAnalyze generated GO-Elite input
###Export dataset criterion using user-defined filters
ExpressionBuilder.buildCriterion(ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, main_output_folder, 'goelite')
#except Exception: null = []; # print 'No expression files to summarize'
if ORA_algorithm == 'Fisher Exact Test':
pathway_permutations = 'FisherExactTest'
goelite_var = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,''
GO_Elite.remoteAnalysis(goelite_var,'UI',Multi=mlp)
AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
except Exception:
print traceback.format_exc()
print_out = "Unexpected error encountered. Please see log file."
IndicatorWindow(print_out,'Continue'); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
def addOnlineSpeciesDatabases(backSelect):
StatusWindow(file_location_defaults,'getOnlineDBConfig')
#except Exception,e: print [e]; null = []
importSystemInfo(); exportSystemInfo() ### By re-importing we incorporate new source data from the downloaded file
existing_species_codes = species_codes
importSpeciesInfo(); online_species = ['']
for species in species_codes: online_species.append(species)
online_species.sort()
importOnlineDatabaseVersions(); db_version_list=[]
for version in db_versions: db_version_list.append(version)
db_version_list.sort(); db_version_list.reverse(); select_version = db_version_list[0]
db_versions[select_version].sort()
option_db['selected_species1'].setArrayOptions(['---']+db_versions[select_version])
option_db['selected_species2'].setArrayOptions(['---']+db_versions[select_version])
option_db['selected_species3'].setArrayOptions(['---']+db_versions[select_version])
option_db['selected_version'].setArrayOptions(db_version_list)
proceed = 'no'
while proceed == 'no':
if backSelect == 'no' or 'OnlineDatabases' == selected_parameters[-1]:
selected_parameters.append('OnlineDatabases'); backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Species Databases Available for Download')
gu = GUI(root,option_db,option_list['OnlineDatabases'],'')
else: gu = PreviousResults(old_options); print 'alpha'
db_version = gu.Results()['selected_version']
exportDBversion(db_version)
try: species1 = gu.Results()['selected_species1']
except Exception: species1='---'
try: species2 = gu.Results()['selected_species2']
except Exception: species2='---'
try: species3 = gu.Results()['selected_species3']
except Exception: species3='---'
try: species_full = gu.Results()['species']
except Exception: species_full = ''
try: update_goelite_resources = gu.Results()['update_goelite_resources']
except Exception: update_goelite_resources = ''
#if species_full == 'Add Species': AltAnalyze.AltAnalyzeSetup(species_full); sys.exit()
new_species_list = [species1,species2,species3]; new_species_codes={}
for species in new_species_list:
if '---' not in species:
#try:
### Export basic species information
sc = species_codes[species].SpeciesCode()
existing_species_codes[species] = species_codes[species]
new_species_codes[sc]=[]
#except Exception: sc = None
if sc != None:
for ad in db_versions_vendors[db_version]:
if ad.SpeciesCodes() == species:
for array_system in array_codes:
ac = array_codes[array_system]
compatible_species = ac.SpeciesCodes()
if ac.Manufacturer() in ad.Manufacturer() and ('expression' in ac.ArrayName() or 'RNASeq' in ac.ArrayName() or 'RNA-seq' in ac.ArrayName()):
if sc not in compatible_species: compatible_species.append(sc)
ac.setSpeciesCodes(compatible_species)
exportArrayInfo(array_codes)
if len(new_species_codes) > 0:
analysis = 'getOnlineEliteDatabase'
values = file_location_defaults,db_version,new_species_codes,update_goelite_resources ### Download the online databases
StatusWindow(values,analysis)
proceed = 'yes'
else:
print_out = "Please select a species before continuing."
IndicatorWindow(print_out,'Try Again')
#db_versions_vendors
exportSpeciesInfo(existing_species_codes)
integrate_online_species = 'no'
def getArraysAndVendors(species,vendor):
array_list2=[]; manufacturer_list=[]
compatible_species,manufacturer_list_all = getSpeciesList('')
for array_name in array_list:
manufacturer = array_codes[array_name].Manufacturer()
if species in array_codes[array_name].SpeciesCodes():
manufacturer_list.append(manufacturer)
if len(vendor)>0:
if vendor == manufacturer: proceed = 'yes'
else: proceed = 'no'
else: proceed = 'yes'
if proceed == 'yes':
array_list2.append(array_name)
manufacturer_list = unique.unique(manufacturer_list)
array_list2 = unique.unique(array_list2) ### Filtered based on compatible species arrays
array_list2.sort(); manufacturer_list.sort()
return array_list2, manufacturer_list
def getSpeciesForArray(array_type):
array_list2=[]; manufacturer_list=[]; manufacturer_list_all=[]
for array_name in array_list:
current_species_codes = array_codes[array_type].SpeciesCodes()
try: current_species_dirs = unique.read_directory('/AltDatabase')
except Exception: current_species_dirs = current_species_codes
current_species_names=[]
for species in species_codes:
species_code = species_codes[species].SpeciesCode()
if species_code in current_species_codes:
if species_code in current_species_dirs: current_species_names.append(species)
current_species_names.sort()
return current_species_names
def verifyLineageProfilerDatabases(species,run_mode):
import AltAnalyze
installed = False
download_species = species
try: gene_database = unique.getCurrentGeneDatabaseVersion()
except Exception: gene_database='00'
try:
if int(gene_database[-2:]) < 62:
print_out = 'LineageProfiler is not supported in this database version (EnsMart62 and higher required).'
print print_out
return False
else:
if species == 'Hs':
source_file = 'AltDatabase/ensembl/'+species+'/'+species+'_exon_tissue-specific_protein_coding.txt'
download_species = 'Hs'
elif species == 'Mm':
source_file = 'AltDatabase/ensembl/'+species+'/'+species+'_gene_tissue-specific_protein_coding.txt'
download_species = 'Mm'
else: ### Use the mouse version instead - less variable data
source_file = 'AltDatabase/ensembl/'+species+'/'+species+'_gene_tissue-specific_protein_coding.txt'
download_species = 'Mm'
file_length = AltAnalyze.verifyFileLength(source_file)
if file_length>0:
installed = True
else:
print_out = 'To perform a LineageProfiler analysis AltAnalyze must\nfirst download the appropriate database.'
if run_mode == 'GUI':
IndicatorWindow(print_out,'Download')
else:
print print_out ### Occurs in command-line mode
filename = 'AltDatabase/ensembl/'+download_species+'_LineageProfiler.zip'
dir = 'AltDatabase/updated/'+gene_database ### Directory at altanalyze.org
var_list = filename,dir
if debug_mode == 'no' and run_mode == 'GUI': StatusWindow(var_list,'download')
else: update.downloadCurrentVersion(filename,dir,None)
file_length = AltAnalyze.verifyFileLength(source_file)
if file_length>0: installed = True
else:
try:
import GeneSetDownloader
GeneSetDownloader.translateBioMarkersBetweenSpecies('AltDatabase/ensembl/'+download_species,species)
except Exception:
None
except Exception: installed = False
return installed
def checkForLocalArraySupport(species,array_type,specific_arraytype,run_mode):
specific_arraytype = string.lower(specific_arraytype) ### Full array name
if array_type == 'junction' or array_type == 'RNASeq':
try: gene_database = unique.getCurrentGeneDatabaseVersion()
except Exception: gene_database='00'
if int(gene_database[-2:]) < 0:
print_out = 'The AltAnalyze database indicated for '+array_type+' analysis\n is not supported for alternative exon analysis.\nPlease update to EnsMart55 or greater before\nproceeding.'
if run_mode == 'GUI': IndicatorWindow(print_out,'Continue')
else: print print_out ### Occurs in command-line mode
AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
downloaded_junction_db = 'no'; file_problem='no'; wrong_junction_db = 'no'
while downloaded_junction_db == 'no': ### Used as validation in case internet connection is unavailable
try: dirs = read_directory('/AltDatabase/'+species)
except Exception: dirs=[]
if wrong_junction_db == 'yes':
print_out = 'Another junction database is installed. Select "Contine" to overwrite or manually change the name of this folder:\n'+filepath('AltDatabase/'+species+'/'+array_type)
if run_mode == 'GUI': IndicatorWindow(print_out,'Continue')
else: print print_out ### Occurs in command-line mode
if array_type not in dirs or file_problem == 'yes' or wrong_junction_db == 'yes':
if file_problem == 'yes':
print_out = 'Unknown installation error occured.\nPlease try again.'
else:
print_out = 'To perform a '+array_type+' analysis AltAnalyze must\nfirst download the appropriate database.'
if run_mode == 'GUI': IndicatorWindow(print_out,'Download')
else: print print_out ### Occurs in command-line mode
if array_type == 'RNASeq': filename = 'AltDatabase/'+species+'_'+array_type+'.zip'
elif 'glue' in specific_arraytype: filename = 'AltDatabase/'+species+'/'+species+'_'+array_type+'_Glue.zip'
elif 'hta 2.0' in specific_arraytype: filename = 'AltDatabase/'+species+'/'+species+'_'+array_type+'_HTA-2_0.zip'
else: filename = 'AltDatabase/'+species+'/'+species+'_'+array_type+'.zip'
dir = 'AltDatabase/updated/'+gene_database; var_list = filename,dir
if debug_mode == 'no' and run_mode == 'GUI':
StatusWindow(var_list,'download')
else: update.downloadCurrentVersion(filename,dir,None)
try: dirs = read_directory('/AltDatabase/'+species)
except Exception: dirs=[]
if array_type in dirs:
import AltAnalyze
file_length = AltAnalyze.verifyFileLength('AltDatabase/'+species+'/'+array_type+'/probeset-domain-annotations-exoncomp.txt')
if file_length>0: downloaded_junction_db = 'yes'
elif species == 'Mm' or species == 'Hs' or species == 'Rn': file_problem = 'yes'
else: downloaded_junction_db = 'yes' ### Occurs when no alternative exons present for species
if array_type == 'junction':
specific_platform = determinePlatform('AltDatabase/'+species+'/'+array_type+'/platform.txt')
if 'glue' in specific_arraytype and 'Glue' not in specific_platform: wrong_junction_db = 'yes'; downloaded_junction_db = 'no'
elif 'glue' not in specific_arraytype and 'Glue' in specific_platform: wrong_junction_db = 'yes'; downloaded_junction_db = 'no'
elif 'hta 2.0' in specific_arraytype and 'HTA-2_0' not in specific_platform: wrong_junction_db = 'yes'; downloaded_junction_db = 'no'
elif 'hta 2.0' not in specific_arraytype and 'HTA-2_0' in specific_platform: wrong_junction_db = 'yes'; downloaded_junction_db = 'no'
#print [specific_arraytype], [specific_platform], wrong_junction_db, downloaded_junction_db
def exportGeneList(gene_list,outputFolder):
filename = string.join(gene_list,' ')[:25]
eo = export.ExportFile(outputFolder+'/GO-Elite_input/'+filename+'.txt')
eo.write('Symbol\tSytemCode\n')
for i in gene_list:
eo.write(i+'\tSy\n')
return outputFolder+'/GO-Elite_input'
def getUserParameters(run_parameter,Multi=None):
global AltAnalyze; import AltAnalyze; global mlp; mlp=Multi ### multiprocessing support
if run_parameter == 'yes':
try: MainMenu()
except Exception:
print traceback.format_exc()
print_out = "\nCritical error encountered!!! This machine does not have either:\n"
print_out += "1) Have the required Tcl/Tk components installed.\n"
print_out += "2) Is being run from a compiled version that has critical incompatibilities your OS or hardware or\n"
print_out += "3) Is being run from source-code in the same-directory as executable code resulting in a conflict\n"
print_out += "\nIf any of these apply, we recommend downloading the Python source-code version of AltAnalyze "
print_out += "(installing necessary dependencies - see our Wiki or Documentation)."
print_out += "Otherwise, please contact AltAnalyze support (http://code.google.com/p/altanalyze/wiki/ContactUs).\n\n"
print_out += "Installation Wiki: http://code.google.com/p/altanalyze/wiki/Installation\n\n"
print print_out
try:
### Create a log report of this
try: log_file = filepath('AltAnalyze_error-report.log')
except Exception: log_file = filepath('/AltAnalyze_error-report.log')
log_report = open(log_file,'w');
log_report.write(print_out)
log_report.write(traceback.format_exc())
log_report.close()
### Open this file
if os.name == 'nt':
try: os.startfile('"'+log_file+'"')
except Exception: os.system('open "'+log_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+log_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+log_file+'/"')
except Exception: None
sys.exit()
global species; species=''; global user_variables; user_variables={}; global analysis_method; global array_type; global vendor
global PathDir; global PathFile; global file_location_defaults; global integrate_online_species; integrate_online_species = 'no'
global option_db; global option_list; global analysis_status; analysis_status = 'continue'; global selected_parameters; selected_parameters=[]
global backSelect; global fl; predictGroups = False
### Get default options for ExpressionBuilder and AltAnalyze
na = 'NA'; log = 'log'; no = 'no'
run_from_scratch=na; expression_threshold=na; perform_alt_analysis=na; expression_data_format=log
include_raw_data=na; avg_all_for_ss=no; dabg_p=na; normalize_feature_exp=na; normalize_gene_data = na
analysis_method=na; p_threshold=na; filter_probeset_types=na; alt_exon_fold_cutoff=na
permute_p_threshold=na; perform_permutation_analysis=na; export_splice_index_values=no
run_MiDAS=no; analyze_functional_attributes=no; microRNA_prediction_method=na
gene_expression_cutoff=na; cel_file_dir=na; input_exp_file=na; input_stats_file=na; filter_for_AS=no
remove_intronic_junctions=na; build_exon_bedfile=no; input_cdf_file = na; bgp_file = na
clf_file = na; remove_xhyb = na; multiThreading = True; input_fastq_dir = ''
compendiumType = 'protein_coding'; compendiumPlatform = 'gene'
calculate_splicing_index_p=no; run_goelite=no; ge_ptype = 'rawp'; probability_algorithm = na
ge_fold_cutoffs=2;ge_pvalue_cutoffs=0.05;filter_method=na;z_threshold=1.96;p_val_threshold=0.05
change_threshold=2;pathway_permutations=na;mod=na; analyze_all_conditions=no; resources_to_analyze=na
additional_algorithms = na; rpkm_threshold = na; exon_exp_threshold = na; run_lineage_profiler = no
gene_exp_threshold = na; exon_rpkm_threshold = na; visualize_results = no; returnPathways = 'no'
batch_effects = na; marker_finder = na
try: option_list,option_db = importUserOptions('exon') ##Initially used to just get the info for species and array_type
except IOError:
### Occurs if Config folder is absent or when the source code is run outside AltAnalyze root
print_out = '\nWarning! The Config folder in the AltAnalyze program directory cannot be found. The likely cause is:\n'
print_out +=' A): The AltAnalyze source-code is being run outside the root AltAnalyze directory or \n'
print_out +=' B): AltAnalyze was zip extracted/installed in a weird way (incommpatible zip extractor)\n'
print_out +='\nIf you beleive (B) is possible, unzip with another unzip program (e.g., default Windows unzip program).'
print_out +='\nIf neither applies, we recommend contacting our help desk (http://code.google.com/p/altanalyze/wiki/ContactUs).'
try: IndicatorWindow(print_out,'Exit')
except Exception: print printout
sys.exit()
importSpeciesInfo()
file_location_defaults = importDefaultFileLocations()
importArrayInfo()
try: elite_db_versions = returnDirectoriesNoReplace('/AltDatabase')
except Exception:
try: elite_db_versions=[]; os.mkdir(filepath('AltDatabase'))
except Exception: null=[] ### directory already exists
try: gene_database_dir = unique.getCurrentGeneDatabaseVersion()
except Exception: gene_database_dir=''
if len(elite_db_versions)>0 and gene_database_dir == '':
gene_database_dir = elite_db_versions[-1]; exportDBversion(elite_db_versions[-1])
current_species_names,manufacturer_list_all = getSpeciesList('')
option_db['species'].setArrayOptions(current_species_names)
try: PathDir = file_location_defaults['PathDir'].Location()
except Exception:
try:
### Entry was deleted from Config file - re-create it
fl = FileLocationData('local', '', 'all')
file_location_defaults['PathDir'] = fl
except Exception: null = None
PathDir = ''
try: PathFile = file_location_defaults['PathFile'].Location()
except Exception:
try:
### Entry was deleted from Config file - re-create it
fl = FileLocationData('local', '', 'all')
file_location_defaults['PathFile'] = fl
except Exception: null = None
PathFile = ''
old_options = []
try:
#### Get information from previous loop
if len(run_parameter) == 2 and run_parameter != 'no': ### Occurs when selecting "Back" from Elite parameter window
old_options = run_parameter[1]; selected_parameters = run_parameter[0]
try:
if selected_parameters[-2]==selected_parameters[-1]: selected_parameters = selected_parameters[:-1]
except Exception: selected_parameters = selected_parameters
backSelect = 'yes'
#print selected_parameters
#print old_options,'\n'
for option in old_options: ### Set options to user selected
try: option_db[option].setDefaultOption(old_options[option])
except Exception: null=[]
user_variables[option] = old_options[option]
if 'array_type' in old_options:
specific_array = old_options['array_type']
vendor = old_options['manufacturer_selection']
species_full = old_options['species']
species = species_codes[species_full].SpeciesCode()
if selected_parameters == []: backSelect = 'no'
else: backSelect = 'no'; old_options=[]
###Update this informatin in option_db which will be over-written after the user selects a species and array_type
option_db['species'].setArrayOptions(current_species_names)
if len(current_species_names)==0 and run_parameter != 'Add Species':
print_out = "No species databases found. Select\ncontinue to proceed with species download."
IndicatorWindow(print_out,'Continue')
integrate_online_species = 'yes'
addOnlineSpeciesDatabases(backSelect)
AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
### Set defaults based on avialable species
#default_vendor = 'RNASeq'
#default_specific_array = 'RNA-seq aligned read counts'
default_vendor = 'Affymetrix'
default_specific_array='Affymetrix expression array'
try: ### If the users have already analyzed Affy data, make this the default
affymetrix_library_dir = 'AltDatabase/affymetrix/LibraryFiles'
affy_dir_list = read_directory(filepath(affymetrix_library_dir))
if len(affy_dir_list)>0:
default_vendor = 'Affymetrix'
default_specific_array='Affymetrix expression array'
except Exception:
None ### Occurs if this directory is missing (possible in future versions)
if run_parameter == 'Add Species': species_full = 'Homo sapiens'; species = 'Hs'; vendor = 'Affymetrix'; specific_array = 'Exon 1.0 ST array'
if backSelect == 'yes' and 'array_type' in old_options: null=[]
elif 'Homo sapiens' in current_species_names: species_full = 'Homo sapiens'; species = 'Hs'; vendor = default_vendor; specific_array = default_specific_array
elif 'Mus musculus' in current_species_names: species_full = 'Mus musculus'; species = 'Mm'; vendor = default_vendor; specific_array = default_specific_array
elif 'Rattus norvegicus' in current_species_names: species_full = 'Rattus norvegicus'; species = 'Rn'; vendor = default_vendor; specific_array = default_specific_array
else:
for species_full in current_species_names:
species = species_codes[species_full].SpeciesCode()
for array_name in array_list:
vendor = array_codes[array_name].Manufacturer()
if species in array_codes[array_name].SpeciesCodes(): specific_array = array_name; break
array_list2, manufacturer_list = getArraysAndVendors(species,vendor)
#print [[array_list2]], species, vendor
option_db['species'].setDefaultOption(species_full)
option_db['array_type'].setArrayOptions(array_list2)
option_db['array_type'].setDefaultOption(specific_array)
option_db['manufacturer_selection'].setArrayOptions(manufacturer_list_all)
option_db['manufacturer_selection'].setDefaultOption(vendor)
manufacturer_list_all_possible=[]
for array_name in array_list:
manufacturer = array_codes[array_name].Manufacturer(); manufacturer_list_all_possible.append(manufacturer)
manufacturer_list_all_possible = unique.unique(manufacturer_list_all_possible); manufacturer_list_all_possible.sort()
if len(elite_db_versions)>1:
option_db['dbase_version'].setArrayOptions(elite_db_versions)
option_db['dbase_version'].setDefaultOption(gene_database_dir)
else:
### Otherwise, remove this option
del option_db['dbase_version']
### Get user array and species selections
if run_parameter != 'Add Species':
if backSelect == 'no' or 'ArrayType' == selected_parameters[-1]:
selected_parameters.append('ArrayType'); backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Select Species and Experimental Platform')
gu = GUI(root,option_db,option_list['ArrayType'],'')
else: gu = PreviousResults(old_options)
species_full = gu.Results()['species']
new_analysis_options=[]
try: update_dbs = gu.Results()['update_dbs']
except Exception: update_dbs = 'no'
try:
selected_parameters[-1]
except Exception:
AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
if update_dbs == 'yes' or species_full == 'Add Species' or 'NewSpecies' == selected_parameters[-1]:
integrate_online_species = 'yes'
addOnlineSpeciesDatabases(backSelect)
AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
elif species_full == 'Add Species' or 'NewSpecies' == selected_parameters[-1]: ### outdated code - bypassed by the above
species_added = 'no'
option_db['new_manufacturer'].setArrayOptions(manufacturer_list_all_possible)
while species_added == 'no':
if backSelect == 'no' or 'NewSpecies' == selected_parameters[-1]:
selected_parameters.append('NewSpecies'); backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Add New Species Support')
gu = GUI(root,option_db,option_list['NewSpecies'],'')
else: gu = PreviousResults(old_options)
new_species_code = gu.Results()['new_species_code']
new_species_name = gu.Results()['new_species_name']
new_manufacturer = gu.Results()['new_manufacturer']
if len(new_species_code)==2 and len(new_species_name)>0 and len(new_manufacturer)>0:
species_added = 'yes'
sd = SpeciesData(new_species_code,new_species_name,[''])
species_codes[new_species_name] = sd
exportSpeciesInfo(species_codes)
try: os.mkdir(filepath('AltDatabase/'+new_species_code))
except Exception: null=[]
for array_system in array_codes:
ac = array_codes[array_system]
manufacturer=ac.Manufacturer()
compatible_species = ac.SpeciesCodes()
if manufacturer == new_manufacturer and 'expression array' in ac.ArrayName():
if new_species_code not in compatible_species: compatible_species.append(new_species_code)
ac.setSpeciesCodes(compatible_species)
exportArrayInfo(array_codes)
fn = filepath('AltDatabase/affymetrix/'+ new_species_code)
try: os.mkdir(fn)
except OSError: null = [] ### Directory already exists
AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
else:
print_out = "Valid species data was not added. You must\nindicate a two letter species code and full species name."
IndicatorWindow(print_out,'Continue')
else: species = species_codes[species_full].SpeciesCode()
array_full = gu.Results()['array_type']
vendor = gu.Results()['manufacturer_selection']
try: array_type = array_codes[array_full].ArrayCode()
except Exception:
### An error occurs because this is a system name for the Other ID option
array_type = "3'array"
vendor = 'other:'+array_full ### Ensembl linked system name
if array_full == 'Normalized externally':
array_type = "3'array"
vendor = 'other:'+array_full ### Ensembl linked system name
if array_type == 'gene':
try: gene_database = unique.getCurrentGeneDatabaseVersion()
except Exception: gene_database='00'
if int(gene_database[-2:]) < 54:
print_out = 'The AltAnalyze database indicated for Gene 1.0 ST\narray analysis is not supported for alternative exon\nanalysis. Please update to EnsMart54 or greater\nbefore proceeding.'
IndicatorWindow(print_out,'Continue'); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
### Examine the AltDatabase folder for directories required for specific array analyses
checkForLocalArraySupport(species,array_type,array_full,'GUI')
if array_type == 'exon' or array_type == 'AltMouse' or array_type == 'gene' or array_type == 'junction':
try: dirs = read_directory('/AltDatabase/'+species)
except Exception: dirs=[]
if len(dirs)==0:
print_out = 'Valid database directories were not found for this array.\nPlease re-install database.'
IndicatorWindow(print_out,'Continue'); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
option_list,option_db = importUserOptions(array_type,vendor=vendor) ##Initially used to just get the info for species and array_type
if array_type == "3'array":
if species == 'Hs': compendiumPlatform = "3'array"
for i in option_db['run_from_scratch'].ArrayOptions():
if 'AltAnalyze' not in i:
if array_type == "3'array":
if 'CEL' in i and vendor != 'Affymetrix': proceed = 'no'
else: proceed = 'yes'
else: proceed = 'yes'
if proceed == 'yes': new_analysis_options.append(i)
option_db['run_from_scratch'].setArrayOptions(new_analysis_options)
proceed = 'no'
if len(new_analysis_options)!=1:
if backSelect == 'no' or 'AnalysisType' == selected_parameters[-1]:
selected_parameters.append('AnalysisType'); backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Select Analysis Method')
gu = GUI(root,option_db,option_list['AnalysisType'],'')
else: gu = PreviousResults(old_options)
run_from_scratch = gu.Results()['run_from_scratch']
else: run_from_scratch = 'Process Expression file'
try: vendor = array_codes[array_full].Manufacturer()
except Exception: None ### Key the existing vendor
try: constitutive_source = array_codes[array_full].ConstitutiveSource()
except Exception: constitutive_source = vendor
if backSelect == 'yes':
for option in old_options: ### Set options to user selected
try: option_db[option].setDefaultOption(old_options[option])
except Exception: null=[]
if run_from_scratch == 'Interactive Result Viewer':
AltAnalyze.AltAnalyzeSetup('remoteViewer');sys.exit()
if run_from_scratch == 'Additional Analyses':
if backSelect == 'no' or 'Additional Analyses' == selected_parameters[-1]:
selected_parameters.append('Additional Analyses'); backSelect = 'no'
root = Tk()
root.title('AltAnalyze: Additional Analysis Options')
gu = GUI(root,option_db,option_list['Additional Analyses'],'')
### Venn Diagram Error here with _tkinter.TclError: image "pyimage36" doesn't exist
else: gu = PreviousResults(old_options)
additional_analyses = gu.Results()['additional_analyses']
if 'nrichment' in additional_analyses:
status = 'repeat'
while status == 'repeat':
if backSelect == 'no' or 'InputGOEliteDirs' == selected_parameters[-1]:
root = Tk(); root.title('AltAnalyze: Select Expression File for Filtering')
selected_parameters.append('InputGOEliteDirs'); backSelect = 'no'
gu = GUI(root,option_db,option_list['InputGOEliteDirs'],'')
else: gu = PreviousResults(old_options)
try: criterion_input_folder = gu.Results()['criterion_input_folder']
except KeyError: criterion_input_folder = '' ### Leave this blank so that the default directory is used
try: criterion_denom_folder = gu.Results()['criterion_denom_folder']
except KeyError: criterion_denom_folder = '' ### Leave this blank so that the default directory is used
try:
try: main_output_folder = gu.Results()['main_output_folder']
except KeyError: main_output_folder = 'GO-Elite/input/' ### Leave this blank so that the default directory is
inputIDs = gu.Results()['inputIDs']
if len(inputIDs)>0:
inputIDs = string.replace(inputIDs, '\r',' ')
inputIDs = string.replace(inputIDs, '\n',' ')
inputIDs = string.split(inputIDs, ' ')
criterion_input_folder = exportGeneList(inputIDs,main_output_folder)
except Exception: inputIDs=[]
if len(criterion_input_folder)>0:# and len(criterion_denom_folder)>0:
try: main_output_folder = gu.Results()['main_output_folder']
except KeyError: main_output_folder = '' ### Leave this blank so that the default directory is
if len(main_output_folder)<1:
### Set output to the same directory or parent if none selected
i = -1 ### 1 directory up
main_output_folder = string.join(string.split(criterion_input_folder,'/')[:i],'/')
status = 'continue'
else:
print_out = "No GO-Elite input or denominator folder(s) selected."
IndicatorWindow(print_out,'Continue')
file_dirs = criterion_input_folder, criterion_denom_folder, main_output_folder
#print file_dirs
### Get GO-Elite Input Parameters
getUpdatedParameters(array_type,species,'Prefiltered',file_dirs)
if additional_analyses == 'Pathway Visualization':
root = Tk()
root.title('AltAnalyze: Visualize Data on WikiPathways')
selected_parameters.append('Pathway Visualization')
GUI(root,'ViewWikiPathways',[],'') ### The last is default attributes (should be stored as defaults in the option_db var)
if additional_analyses == 'Identifier Translation':
try:
selected_parameters.append('Identifier Translation')
supported_geneid_types = getSupportedGeneSystems(species,'uid-gene')
option_db['input_source'].setArrayOptions(['None Selected']+supported_geneid_types)
option_db['output_source'].setArrayOptions(['None Selected']+supported_geneid_types)
#option_db['PathwaySelection'].setArrayOptions(supported_genesets)
except Exception,e:
print traceback.format_exc()
status = 'repeat'
while status == 'repeat':
root = Tk()
root.title('AltAnalyze: Translate Input File Identifiers to Another System')
gu = GUI(root,option_db,option_list['IDConverter'],'')
try: input_cluster_file = gu.Results()['input_cluster_file']
except Exception: input_cluster_file = ''
input_data_file = gu.Results()['input_data_file']
input_source = gu.Results()['input_source']
output_source = gu.Results()['output_source']
if len(input_data_file)>0 and input_source != 'None Selected' and output_source != 'None Selected':
analysis = 'IDConverter'
values = input_data_file, species, input_source, output_source
StatusWindow(values,analysis) ### display an window with download status
AltAnalyze.AltAnalyzeSetup((selected_parameters[:-1],user_variables)); sys.exit()
else:
print_out = "No input expression file selected."
IndicatorWindow(print_out,'Continue')
if additional_analyses == 'Merge Files':
selected_parameters.append('Merge Files')
status = 'repeat'
while status == 'repeat':
root = Tk()
root.title('AltAnalyze: Merge Multiple Text Files Containing Common IDs')
gu = GUI(root,option_db,option_list['MergeFiles'],'')
input_file1 = gu.Results()['input_file1']
input_file2 = gu.Results()['input_file2']
input_file3 = gu.Results()['input_file3']
input_file4 = gu.Results()['input_file4']
join_option = gu.Results()['join_option']
ID_option = gu.Results()['ID_option']
output_merge_dir = gu.Results()['output_merge_dir']
if len(input_file1)>0 and len(input_file2)>0 and len(output_merge_dir)>0:
if ID_option == 'False': ID_option = False
if ID_option == 'True': ID_option = True
analysis = 'MergeFiles'
files_to_merge = [input_file1, input_file2]
if len(input_file3)>0: files_to_merge.append(input_file3)
if len(input_file4)>0: files_to_merge.append(input_file4)
values = files_to_merge, join_option, ID_option, output_merge_dir
StatusWindow(values,analysis) ### display an window with download status
AltAnalyze.AltAnalyzeSetup((selected_parameters[:-1],user_variables)); sys.exit()
else:
print_out = "No input expression file selected."
IndicatorWindow(print_out,'Continue')
if additional_analyses == 'Venn Diagram':
selected_parameters.append('Venn Diagram')
status = 'repeat'
while status == 'repeat':
root = Tk()
root.title('AltAnalyze: View Venn Diagram from AltAnalyze or Input Files')
gu = GUI(root,option_db,option_list['VennDiagram'],'')
input_file1 = gu.Results()['venn_input_file1']
input_file2 = gu.Results()['venn_input_file2']
input_file3 = gu.Results()['venn_input_file3']
input_file4 = gu.Results()['venn_input_file4']
venn_output_dir = gu.Results()['venn_output_dir']
if len(input_file1)>0 and len(input_file2)>0 and len(venn_output_dir)>0:
analysis = 'VennDiagram'
files_to_merge = [input_file1, input_file2]
if len(input_file3)>0: files_to_merge.append(input_file3)
if len(input_file4)>0: files_to_merge.append(input_file4)
values = files_to_merge, venn_output_dir
StatusWindow(values,analysis) ### display an window with download status
AltAnalyze.AltAnalyzeSetup((selected_parameters[:-1],user_variables)); sys.exit()
else:
print_out = "No input expression file selected."
IndicatorWindow(print_out,'Continue')
if additional_analyses == 'AltExon Viewer':
selected_parameters.append('AltExon Viewer')
status = 'repeat'
while status == 'repeat':
root = Tk()
root.title('AltAnalyze: Visualize Exon-Level Expression Results')
gu = GUI(root,option_db,option_list['AltExonViewer'],'')
altanalyze_results_folder = gu.Results()['altanalyze_results_folder']
data_type = gu.Results()['data_type']
show_introns = gu.Results()['show_introns']
gene_symbol = gu.Results()['gene_symbol']
altgenes_file = gu.Results()['altgenes_file']
analysisType = gu.Results()['analysisType']
if len(altgenes_file)>0:
gene_symbol = importGeneList(altgenes_file) ### list of gene IDs or symbols
if analysisType == 'Sashimi-Plot':
altanalyze_results_folder = string.split(altanalyze_results_folder,'AltResults')[0]
exp_file = altanalyze_results_folder
gene_symbol = altgenes_file
elif data_type == 'raw expression': ### Switch directories if expression
altanalyze_results_folder = string.replace(altanalyze_results_folder,'AltResults','ExpressionInput')
exp_file = getValidExpFile(altanalyze_results_folder)
else:
altanalyze_results_folder += '/RawSpliceData/'+species
try: exp_file = getValidSplicingScoreFile(altanalyze_results_folder)
except Exception,e:
print_out = "No files found in: "+altanalyze_results_folder
IndicatorWindow(print_out,'Continue')
if len(exp_file)>0 and len(gene_symbol)>0:
analysis = 'AltExonViewer'
values = species,array_type,exp_file,gene_symbol,show_introns,analysisType
StatusWindow(values,analysis) ### display an window with download status
if len(altgenes_file)>0 or ' ' in gene_symbol:
### Typically have a Tkinter related error
if os.name == 'posix':
try:
package_path = filepath('python')
mac_package_path = string.replace(package_path,'python','AltAnalyze.app/Contents/MacOS/AltAnalyze')
kill
os.system(mac_package_path+' --GUI yes');sys.exit()
except Exception:
package_path = filepath('python')
print package_path
mac_package_path = string.replace(package_path,'python','AltAnalyze.py')
mac_package_path = 'python '+mac_package_path
os.system(mac_package_path+' --GUI yes');sys.exit()
else:
AltAnalyze.AltAnalyzeSetup((selected_parameters[:-1],user_variables)); sys.exit()
else:
AltAnalyze.AltAnalyzeSetup((selected_parameters[:-1],user_variables)); sys.exit()
else:
print_out = "Either no gene or no AltResults folder selected."
IndicatorWindow(print_out,'Continue')
if additional_analyses == 'Network Visualization':
selected_parameters.append('Network Visualization')
supported_interaction_types = getSupportedGeneSetTypes(species,'gene-interactions')
supported_geneset_types = getSupportedGeneSetTypes(species,'gene-mapp')
supported_geneset_types += getSupportedGeneSetTypes(species,'gene-go')
option_db['GeneSetSelection_network'].setArrayOptions(['None Selected']+supported_geneset_types)
option_db['PathwaySelection_network'].setArrayOptions(['None Selected'])
#option_db['PathwaySelection'].setArrayOptions(supported_genesets)
status = 'repeat'
while status == 'repeat':
### If no databases present download and populate gene-interactions folder
if len(supported_interaction_types)==0:
print_out = 'No interaction databases available.\nPress Continue to download interaction\ndatabases for this species.'
IndicatorWindow(print_out,'Continue')
downloadInteractionDBs(species,'parent')
### Get present interaction databases (including custom added)
updated_list=[]
if 'WikiPathways' in supported_interaction_types: updated_list.append('WikiPathways')
if 'KEGG' in supported_interaction_types: updated_list.append('KEGG')
if 'TFTargets' in supported_interaction_types: updated_list.append('TFTargets')
if 'BioGRID' in supported_interaction_types: updated_list.append('BioGRID')
for db in supported_interaction_types:
if 'microRNATargets' in db:
updated_list.append('common-microRNATargets'); updated_list.append('all-microRNATargets')
elif 'DrugBank' in db:
updated_list.append('common-DrugBank'); updated_list.append('all-DrugBank')
elif db not in updated_list: updated_list.append(db)
option_db['interactionDirs'].setArrayOptions(updated_list)
root = Tk()
root.title('AltAnalyze: Create and Visualize Interaction Networks')
gu = GUI(root,option_db,option_list['network'],'')
Genes_network = gu.Results()['Genes_network']
inputDir_network = gu.Results()['input_ID_file']
GeneSetSelection_network = gu.Results()['GeneSetSelection_network']
inputType_network = gu.Results()['inputType_network']
PathwaySelection_network = gu.Results()['PathwaySelection_network']
OntologyID_network = gu.Results()['OntologyID_network']
interactionDirs = gu.Results()['interactionDirs']
degrees = gu.Results()['degrees']
update_interactions = gu.Results()['update_interactions']
expressionFile_network = gu.Results()['elite_exp_file']
outputDir_network = gu.Results()['output_net_folder']
includeExpIDs_network = gu.Results()['includeExpIDs_network']
### Set the below variables to the appropriate object types
if update_interactions == 'yes': update_interactions = True
else: update_interactions = False
if len(inputDir_network) == 0: inputDir_network = None
if len(expressionFile_network) == 0: expressionFile_network = None
if len(Genes_network) == 0: Genes_network = None
if len(outputDir_network) == 0: outputDir_network = None
if len(GeneSetSelection_network) == 'None Selected': GeneSetSelection_network = None
if includeExpIDs_network=='yes': includeExpIDs_network = True
else: includeExpIDs_network = False
### Save these as instances of GeneSelectionParameters (easier this way to add more object types in the future)
gsp = GeneSelectionParameters(species,array_type,vendor) ### only species currently neeed
gsp.setGeneSet(GeneSetSelection_network)
gsp.setPathwaySelect(PathwaySelection_network)
gsp.setGeneSelection(Genes_network)
gsp.setOntologyID(OntologyID_network)
gsp.setIncludeExpIDs(includeExpIDs_network)
if update_interactions:
downloadInteractionDBs(species,'parent')
if outputDir_network==None:
print_out = "No output directory selected."
IndicatorWindow(print_out,'Continue')
elif inputDir_network != None or GeneSetSelection_network != None or Genes_network != None:
analysis = 'network'
values = inputDir_network,inputType_network,outputDir_network,interactionDirs,degrees,expressionFile_network,gsp
StatusWindow(values,analysis,windowType='parent') ### display an window with download status
AltAnalyze.AltAnalyzeSetup((selected_parameters[:-1],user_variables)); sys.exit()
else:
print_out = "No input gene IDs, expression file or GeneSet selected."
IndicatorWindow(print_out,'Continue')
if additional_analyses == 'Hierarchical Clustering':
selected_parameters.append('Hierarchical Clustering')
supported_geneset_types = getSupportedGeneSetTypes(species,'gene-mapp')
supported_geneset_types += getSupportedGeneSetTypes(species,'gene-go')
option_db['GeneSetSelection'].setArrayOptions(['None Selected']+supported_geneset_types)
option_db['PathwaySelection'].setArrayOptions(['None Selected'])
option_db['ClusterGOElite'].setArrayOptions(['None Selected','all']+supported_geneset_types)
#option_db['PathwaySelection'].setArrayOptions(supported_genesets)
status = 'repeat'
while status == 'repeat':
root = Tk()
root.title('AltAnalyze: Create a Heatmap from an Expression Matrix')
gu = GUI(root,option_db,option_list['heatmap'],'')
try: input_cluster_file = gu.Results()['input_cluster_file']
except Exception: input_cluster_file = ''
column_metric = gu.Results()['column_metric']
column_method = gu.Results()['column_method']
row_metric = gu.Results()['row_metric']
row_method = gu.Results()['row_method']
color_selection = gu.Results()['color_selection']
cluster_rows = gu.Results()['cluster_rows']
cluster_columns = gu.Results()['cluster_columns']
GeneSetSelection = gu.Results()['GeneSetSelection']
PathwaySelection = gu.Results()['PathwaySelection']
GeneSelection = gu.Results()['GeneSelection']
ClusterGOElite = gu.Results()['ClusterGOElite']
HeatmapAdvanced = gu.Results()['HeatmapAdvanced']
JustShowTheseIDs = gu.Results()['JustShowTheseIDs']
geneSetName = gu.Results()['heatmapGeneSets']
try: CorrelationCutoff = float(gu.Results()['CorrelationCutoff'])
except Exception: CorrelationCutoff=None
OntologyID = gu.Results()['OntologyID']
transpose = gu.Results()['transpose']
normalization = gu.Results()['normalization']
contrast = gu.Results()['contrast']
if transpose == 'yes': transpose = True
else: transpose = False
translate={'None Selected':'','Exclude Cell Cycle Effects':'excludeCellCycle','Top Correlated Only':'top','Positive Correlations Only':'positive','Perform Iterative Discovery':'driver', 'Intra-Correlated Only':'IntraCorrelatedOnly', 'Perform Monocle':'monocle'}
try:
if 'None Selected' in HeatmapAdvanced: pass
except Exception: HeatmapAdvanced = ('None Selected')
if ('None Selected' in HeatmapAdvanced and len(HeatmapAdvanced)==1) or 'None Selected' == HeatmapAdvanced: pass
else:
try:
GeneSelection += ' '+string.join(list(HeatmapAdvanced),' ')
for name in translate:
GeneSelection = string.replace(GeneSelection,name,translate[name])
GeneSelection = string.replace(GeneSelection,' ',' ')
if 'top' in GeneSelection or 'driver' in GeneSelection or 'excludeCellCycle' in GeneSelection or 'positive' in GeneSelection or 'IntraCorrelatedOnly' in GeneSelection:
GeneSelection+=' amplify'
except Exception: pass
GeneSetSelection = string.replace(GeneSetSelection,'\n',' ')
GeneSetSelection = string.replace(GeneSetSelection,'\r',' ')
#print [GeneSetSelection, JustShowTheseIDs, GeneSelection,ClusterGOElite,normalization]
if GeneSetSelection != 'None Selected' or GeneSelection != '' or normalization != 'NA' or JustShowTheseIDs != '' or JustShowTheseIDs != 'None Selected':
gsp = GeneSelectionParameters(species,array_type,vendor)
if CorrelationCutoff!=None: #len(GeneSelection)>0 and
gsp.setRhoCutoff(CorrelationCutoff)
GeneSelection = 'amplify '+GeneSelection
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setOntologyID(OntologyID)
gsp.setTranspose(transpose)
gsp.setNormalize(normalization)
gsp.setJustShowTheseIDs(JustShowTheseIDs)
gsp.setClusterGOElite(ClusterGOElite)
gsp.setStoreGeneSetName(geneSetName)
transpose = gsp ### this allows methods that don't transmit this object to also work
if len(input_cluster_file)>0:
analysis = 'createHeatMap'
color_selection=string.replace(color_selection, '-','_')
if cluster_rows == 'no': row_method = None
if cluster_columns == 'no': column_method = None
values = input_cluster_file, row_method, row_metric, column_method, column_metric, color_selection, transpose, contrast
StatusWindow(values,analysis) ### display an window with download status
AltAnalyze.AltAnalyzeSetup((selected_parameters[:-1],user_variables)); sys.exit()
else:
print_out = "No input expression file selected."
IndicatorWindow(print_out,'Continue')
if additional_analyses == 'Principal Components':
selected_parameters.append('Principal Components')
status = 'repeat'
while status == 'repeat':
root = Tk()
root.title('AltAnalyze: Perform Principal Component Analysis from an Expression Matrix')
gu = GUI(root,option_db,option_list['PCA'],'')
try: input_cluster_file = gu.Results()['input_cluster_file']
except Exception: input_cluster_file = ''
dimensions = gu.Results()['dimensions']
pca_labels = gu.Results()['pca_labels']
pca_algorithm = gu.Results()['pca_algorithm']
transpose = gu.Results()['transpose']
geneSetName = gu.Results()['pcaGeneSets']
if len(geneSetName)==0:
geneSetName = None
if len(input_cluster_file)>0:
analysis = 'performPCA'
if transpose == 'yes': transpose = True
else: transpose = False
values = input_cluster_file, pca_labels, dimensions, pca_algorithm, transpose, geneSetName, species
StatusWindow(values,analysis) ### display an window with download status
AltAnalyze.AltAnalyzeSetup((selected_parameters[:-1],user_variables)); sys.exit()
else:
print_out = "No input expression file selected."
IndicatorWindow(print_out,'Continue')
if additional_analyses == 'Lineage Analysis':
selected_parameters.append('Lineage Analysis')
status = 'repeat'
while status == 'repeat':
root = Tk()
if species == 'Mm':
option_db['compendiumPlatform'].setDefaultOption('gene')
if species == 'Hs':
option_db['compendiumPlatform'].setDefaultOption('exon')
if array_type == "3'array":
option_db['compendiumType'].setArrayOptions(["protein_coding"])
root.title('AltAnalyze: Perform Lineage Profiler Analysis from an Expression Matrix')
gu = GUI(root,option_db,option_list['LineageProfiler'],'')
input_exp_file = gu.Results()['input_lineage_file']
compendiumPlatform = gu.Results()['compendiumPlatform']
compendiumType = gu.Results()['compendiumType']
markerFinder_file = gu.Results()['markerFinder_file']
geneModel_file = gu.Results()['geneModel_file']
modelDiscovery = gu.Results()['modelDiscovery']
if len(geneModel_file) == 0: geneModel_file = None
if len(modelDiscovery) == 0: modelDiscovery = None
if len(input_exp_file)>0:
analysis = 'runLineageProfiler'
fl = ExpressionFileLocationData('','','','') ### Create this object to store additional parameters for LineageProfiler
fl.setCompendiumType(compendiumType)
fl.setCompendiumPlatform(compendiumPlatform)
values = fl, input_exp_file, vendor, markerFinder_file, geneModel_file, modelDiscovery
StatusWindow(values,analysis) ### display an window with download status
AltAnalyze.AltAnalyzeSetup((selected_parameters[:-1],user_variables)); sys.exit()
else:
print_out = "No input expression file selected."
IndicatorWindow(print_out,'Continue')
if 'CEL files' in run_from_scratch or 'RNA-seq reads' in run_from_scratch or 'Feature Extraction' in run_from_scratch:
"""Designate CEL, Agilent or BED file directory, Dataset Name and Output Directory"""
assinged = 'no'
while assinged == 'no': ### Assigned indicates whether or not the CEL directory and CDF files are defined
if species == 'Rn' or array_type == 'RNASeq': del option_list['InputCELFiles'][-1] ### Don't examine xyb
#print (((backSelect,selected_parameters)))
if backSelect == 'no' or 'InputCELFiles' == selected_parameters[-1]:
selected_parameters.append('InputCELFiles'); backSelect = 'no'
root = Tk()
if array_type == 'RNASeq': root.title('AltAnalyze: Select Exon and/or Junction files to analyze'); import_file = 'BED, BAM, TAB or TCGA'
elif vendor == 'Agilent': root.title('AltAnalyze: Select Agilent Feature Extraction text files to analyze'); import_file = '.txt'
else: root.title('AltAnalyze: Select CEL files for APT'); import_file = '.CEL'
gu = GUI(root,option_db,option_list['InputCELFiles'],'')
else: gu = PreviousResults(old_options)
dataset_name = gu.Results()['dataset_name']
try: remove_xhyb = gu.Results()['remove_xhyb']
except KeyError: remove_xhyb = 'no'
try:
multiThreading = gu.Results()['multithreading']
if multiThreading == 'yes': multiThreading = True
else: multiThreading = False
except KeyError: multiThreading = True
try:
build_exon_bedfile = gu.Results()['build_exon_bedfile']
try: normalize_feature_exp = 'RPKM'
except Exception: pass
except KeyError: build_exon_bedfile = 'no'
try:
input_fastq_dir = gu.Results()['input_fastq_dir']
except Exception: pass
try: channel_to_extract = gu.Results()['channel_to_extract']
except Exception: channel_to_extract = 'no'
if build_exon_bedfile == 'yes':
print_out = 'Please note: AltAnalyze will exit immediately after\nimporting your junction results to allow you to build\nyour exon count files and reload this data.'
IndicatorWindowSimple(print_out,'Continue')
run_from_scratch = 'buildExonExportFiles'
if len(dataset_name)<1:
print_out = "Please provide a name for the dataset before proceeding."
IndicatorWindow(print_out,'Continue')
elif 'input_cel_dir' in gu.Results() or 'input_fastq_dir' in gu.Results():
if len(input_fastq_dir)>0:
import RNASeq
cel_files = RNASeq.runKallisto(species,'',input_fastq_dir,input_fastq_dir,returnSampleNames=True)
try: output_dir = gu.Results()['output_CEL_dir']
except KeyError: output_dir = input_fastq_dir
option_db['perform_alt_analysis'].setArrayOptions(['NA'])
option_db['exon_exp_threshold'].setArrayOptions(['NA'])
option_db['exon_rpkm_threshold'].setArrayOptions(['NA'])
option_db['expression_threshold'].setArrayOptions(['NA'])
option_db['gene_exp_threshold'].setArrayOptions(['NA'])
assinged = 'yes'
else:
cel_file_dir = gu.Results()['input_cel_dir']
cel_files,cel_files_fn=identifyCELfiles(cel_file_dir,array_type,vendor)
try: output_dir = gu.Results()['output_CEL_dir']
except KeyError: output_dir = cel_file_dir
if len(output_dir)==0: output_dir = cel_file_dir
if len(cel_files)>0: assinged = 'yes' ### CEL files are present in this directory
else:
print_out = "No valid "+import_file+" files were found in the directory\n"+cel_file_dir+"\nPlease verify and try again."
IndicatorWindow(print_out,'Continue')
else:
print_out = "The directory containing "+import_file+" files has not\nbeen assigned! Select a directory before proceeding."
IndicatorWindow(print_out,'Continue')
if array_type != 'RNASeq' and vendor != 'Agilent' and len(input_fastq_dir)==0:
### Specific to Affymetrix CEL files
cel_file_list_dir = exportCELFileList(cel_files_fn,cel_file_dir)
"""Determine if Library and Annotations for the array exist, if not, download or prompt for selection"""
specific_array_types,specific_array_type = identifyArrayType(cel_files_fn); num_array_types = len(specific_array_types)
#except Exception: null=[]; num_array_types=1; specific_array_type = None
importSupportedArrayInfo()
try:
sa = supproted_array_db[specific_array_type]; array_species = sa.Species(); cel_array_type = sa.ArrayType()
except Exception: library_dir=''; array_species=''; annotation_dir=''; cel_array_type=''
if backSelect == 'no':
### Check for issues with arrays or user input options
if num_array_types>1: ### More than one array type found in the directory
print_out = 'Warning!!!!!!!\n\nMultiple array_types found ("'+specific_array_types[0]+'" and "'+specific_array_types[1]+'").\nIt is recommended you restart, otherwise, APT will try\n to process all different array types together as "'+specific_array_types[-1]+'".'
IndicatorWindow(print_out,'Continue with Existing')
if array_species != species and len(array_species)>0:
print_out = "The CEL files indicate that the proper\nspecies is "+array_species+", however, you\nindicated "+species+ ". The species indicated by the CEL\nfiles will be used instead."
IndicatorWindow(print_out,'Continue')
species = array_species
try: spdirs = read_directory('/AltDatabase/'+species)
except Exception: spdirs = []
if len(spdirs)==0:
print_out = 'Valid database directories were not found for this species.\nPlease re-install database.'
IndicatorWindow(print_out,'Continue'); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
if cel_array_type != array_type and len(cel_array_type)>0:
print_out = "The CEL files indicate that the proper\narray type is "+cel_array_type+", however, you\nindicated "+array_type+ "." #The array type indicated by the CEL\nfiles will be used instead
#IndicatorWindow(print_out,'Continue')
fw = FeedbackWindow(print_out,'Use AltAnalyze Recommended',"Use Original Selected")
choice = fw.ButtonSelection()['button']
if choice == 'Use AltAnalyze Recommended':
array_type = cel_array_type
option_list,option_db = importUserOptions(array_type) ##Initially used to just get the info for species and array_type
option_db['array_type'].setArrayOptions(array_list)
#user_variables['array_type'] = array_type
### See if the library and annotation files are on the server or are local
else: specific_array_type = ''; annotation_dir=''
if specific_array_type == None:
if array_type == 'exon':
if species == 'Hs': specific_array_type = 'HuEx-1_0-st-v2'
if species == 'Mm': specific_array_type = 'MoEx-1_0-st-v2'
if species == 'Rn': specific_array_type = 'RaEx-1_0-st-v2'
elif array_type == 'gene':
if species == 'Hs': specific_array_type = 'HuGene-1_0-st-v1'
if species == 'Mm': specific_array_type = 'MoGene-1_0-st-v1'
if species == 'Rn': specific_array_type = 'RaGene-1_0-st-v1'
elif array_type == 'AltMouse': specific_array_type = 'altMouseA'
""" ### Comment this out to allow for different junction sub-types (likely do the above in the future)
elif array_type == 'junction':
if species == 'Hs': specific_array_type = 'HJAY_v2'
if species == 'Mm': specific_array_type = 'MJAY_v2'
"""
if specific_array_type in supproted_array_db:
input_cdf_file, annotation_dir, bgp_file, clf_file = getAffyFiles(specific_array_type,species)
else: input_cdf_file=''; bgp_file = ''; clf_file = ''
### Remove the variable names for Library and Annotation file selection if these files are found
option_list_library=[]
if len(input_cdf_file)>0:
for i in option_list['InputLibraryFiles']:
if i != 'input_cdf_file': option_list_library.append(i)
if len(annotation_dir)>0:
for i in option_list['InputLibraryFiles']:
if i != 'input_annotation_file': option_list_library.append(i)
if len(option_list_library)==0:
option_list_library = option_list['InputLibraryFiles']
"""Identify and copy over any Libary or Annotation files on the computer"""
if (len(input_cdf_file)==0 and len(annotation_dir) == 0) and backSelect == 'no':
### Note: above line used to be "or" between the input_cdf_file and annotation_dir
### this was discontinued in version 2.0.9 since the annotation file is no longer needed
### unless the array type is not in the GO-elite database
assinged = 'no'
while assinged == 'no': ### Assigned indicates whether or not the CEL directory and CDF files are defined
if array_type == "3'array":
op = option_db['input_cdf_file']; input_cdf_file_label = op.Display()
op.setNotes(' note: the CDF file is apart of the standard library files for this array. ')
input_cdf_file_label = string.replace(input_cdf_file_label,'PGF','CDF')
op.setDisplay(input_cdf_file_label)
if array_type == 'exon':
op = option_db['input_annotation_file']
new_notes = string.replace(op.Notes(),'this array','the Gene 1.0 array (NOT Exon)')
new_notes = string.replace(new_notes,'annotations','transcript cluster annotations')
new_display = string.replace(op.Display(),'your array','the Gene 1.0 array')
op.setDisplay(new_display)
op.setNotes(new_notes)
#if backSelect == 'no' or 'Library' == selected_parameters[-1]:
selected_parameters.append('Library')#; backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Select Affymetrix Library and Annotation files')
gu = GUI(root,option_db,option_list_library,'')
#else: gu = PreviousResults(old_options)
if 'input_cdf_file' in option_list_library: ### Deals with Annotation Files
if 'input_cdf_file' in gu.Results():
input_cdf_file = gu.Results()['input_cdf_file']; input_cdf_file_lower = string.lower(input_cdf_file)
if array_type == "3'array":
if '.cdf' in input_cdf_file_lower:
clf_file='';bgp_file=''; assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_cdf_file,'/'); cdf_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
destination_parent = osfilepath(destination_parent+cdf_short)
#print destination_parent
#print input_cdf_file
if destination_parent not in input_cdf_file:
info_list = input_cdf_file,destination_parent; StatusWindow(info_list,'copy')
else:
print_out = "The file;\n"+input_cdf_file+"\ndoes not appear to be a valid Affymetix\nlibrary file. If you do not have library files, you must\ngo to the Affymetrix website to download."
IndicatorWindow(print_out,'Continue')
else:
if '.pgf' in input_cdf_file_lower:
###Check to see if the clf and bgp files are present in this directory
icf_list = string.split(input_cdf_file,'/'); parent_dir = string.join(icf_list[:-1],'/'); cdf_short = icf_list[-1]
clf_short = string.replace(cdf_short,'.pgf','.clf')
kil_short = string.replace(cdf_short,'.pgf','.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction': bgp_short = string.replace(cdf_short,'.pgf','.antigenomic.bgp')
else: bgp_short = string.replace(cdf_short,'.pgf','.bgp')
dir_list = read_directory(parent_dir)
if clf_short in dir_list and bgp_short in dir_list:
pgf_file = input_cdf_file
clf_file = string.replace(pgf_file,'.pgf','.clf')
kil_file = string.replace(pgf_file,'.pgf','.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction': bgp_file = string.replace(pgf_file,'.pgf','.antigenomic.bgp')
else: bgp_file = string.replace(pgf_file,'.pgf','.bgp')
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
#print destination_parent
#print input_cdf_file
if destination_parent not in input_cdf_file:
info_list = input_cdf_file,osfilepath(destination_parent+cdf_short); StatusWindow(info_list,'copy')
info_list = clf_file,osfilepath(destination_parent+clf_short); StatusWindow(info_list,'copy')
info_list = bgp_file,osfilepath(destination_parent+bgp_short); StatusWindow(info_list,'copy')
if 'Glue' in pgf_file:
info_list = kil_file,osfilepath(destination_parent+kil_short); StatusWindow(info_list,'copy')
else:
print_out = "The directory;\n"+parent_dir+"\ndoes not contain either a .clf or antigenomic.bgp\nfile, required for probeset summarization."
IndicatorWindow(print_out,'Continue')
else:
print_out = "The file;\n"+input_cdf_file+"\ndoes not appear to be a valid Affymetix\nlibrary file. If you do not have library files, you must\ngo to the Affymetrix website to download."
IndicatorWindow(print_out,'Continue')
else:
print_out = "No library file has been assigned. Please\nselect a valid library file for this array."
IndicatorWindow(print_out,'Continue')
if 'input_annotation_file' in option_list_library: ### Deals with Annotation Files
assinged = 'yes'
if 'input_annotation_file' in gu.Results():
input_annotation_file = gu.Results()['input_annotation_file']; input_annotation_lower = string.lower(input_annotation_file)
if '.csv' in input_annotation_lower:
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_annotation_file,'/'); csv_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/'+species+'/'
#print destination_parent
#print input_cdf_file
if destination_parent not in input_cdf_file:
info_list = input_annotation_file,filepath(destination_parent+csv_short); StatusWindow(info_list,'copy')
sd = SupprotedArrays(specific_array_type,cdf_short,csv_short,species,array_type)
supproted_array_db[specific_array_type] = sd
try: exportSupportedArrayInfo()
except Exception: continue ### Occurs if the file is open... not critical to worry about
if run_from_scratch == 'Process Expression file':
status = 'repeat'
while status == 'repeat':
if backSelect == 'no' or 'InputExpFiles' == selected_parameters[-1]:
root = Tk(); root.title('AltAnalyze: Select Expression File for Filtering')
selected_parameters.append('InputExpFiles'); backSelect = 'no'
gu = GUI(root,option_db,option_list['InputExpFiles'],'')
else: gu = PreviousResults(old_options)
try: input_exp_file = gu.Results()['input_exp_file']
except KeyError: input_exp_file = '' ### Leave this blank so that the default directory is used
try: input_stats_file = gu.Results()['input_stats_file']
except KeyError: input_stats_file = '' ### Leave this blank so that the default directory is used
#if array_type == 'exon':
if 'steady-state' in input_exp_file or 'steady-state' in input_stats_file:
print_out = "Do not select steady-state expression files.."
IndicatorWindow(print_out,'Continue'); output_dir=''
elif len(input_exp_file)>0:
try: output_dir = gu.Results()['output_dir']
except KeyError: output_dir = '' ### Leave this blank so that the default directory is used
try: cel_files, array_linker_db = ExpressionBuilder.getArrayHeaders(input_exp_file)
except Exception:
print_out = "Input Expression file does not have a valid format."
IndicatorWindow(print_out,'Continue'); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
if len(cel_files)>0: status = 'continue'
else:
print_out = "The expression file:\n"+input_exp_file+"\ndoes not appear to be a valid expression file. Check to see that\nthis is the correct tab-delimited text file."
IndicatorWindow(print_out,'Continue')
else:
print_out = "No input expression file selected."
IndicatorWindow(print_out,'Continue')
if len(output_dir)<1:
### Set output to the same directory or parent if none selected
if 'ExpressionInput' in input_exp_file: i = -2
else: i = -1
output_dir = string.join(string.split(input_exp_file,'/')[:i],'/')
if array_type == 'RNASeq':
counts_file = string.replace(input_exp_file,'exp.','counts.')
count = verifyFileLength(counts_file)
if count == 0 or 'exp.' not in input_exp_file: #No counts file
systm = getGeneSystem(input_exp_file)
### Wrong platform listed
array_type = "3'array"
vendor = 'other:'+systm ### Ensembl linked system name
if old_options==[]: ### If we haven't hit the back button
if len(old_options)==0:
option_list,option_db = importUserOptions(array_type) ### will re-set the paramater values, so not good for back select
user_variables['array_type'] = array_type
#print array_type, vendor
if array_type != 'RNASeq':
### This is the new option for expression filtering of non-RNASeq classified data
try:
#print option_db['rpkm_threshold'].DefaultOption()
if 'rpkm_threshold' in option_db:
option_db['rpkm_threshold'].setArrayOptions('0')
if option_db['rpkm_threshold'].DefaultOption() == ['NA']:
option_db['rpkm_threshold'].setDefaultOption('0')
option_db['rpkm_threshold'].setDisplay('Remove genes expressed below (non-log)')
else:
option_db['rpkm_threshold'].setArrayOptions('0')
option_db['rpkm_threshold'].setDefaultOption('0')
option_db['rpkm_threshold'].setDisplay('Remove genes expressed below (non-log)')
except Exception:
option_db['rpkm_threshold'].setArrayOptions('0')
option_db['rpkm_threshold'].setDefaultOption('0')
option_db['rpkm_threshold'].setDisplay('Remove genes expressed below (non-log)')
if "ExpressionInput" not in output_dir and len(input_exp_file)>1 and "ExpressionInput" not in input_exp_file:
try:
### If the user designates an output directory that doesn't contain ExpressionInput, move the exp-file there and rename
output_dir = output_dir + '/ExpressionInput' ### Store the result files here so that files don't get mixed up
try: os.mkdir(output_dir) ### Since this directory doesn't exist we have to make it
except OSError: null = [] ### Directory already exists
if 'exp.' not in input_exp_file: exp_prefix = 'exp.'
else: exp_prefix=''
moved_exp_dir = output_dir+'/'+exp_prefix+export.findFilename(input_exp_file)
export.copyFile(input_exp_file, moved_exp_dir)
input_exp_file = moved_exp_dir
if len(input_stats_file)>1: ### Do the same for a stats file
if 'stats.' not in input_exp_file: stats_prefix = 'stats.'
else: stats_prefix=''
moved_stats_dir = output_dir+'/'+stats_prefix+export.findFilename(input_stats_file)
export.copyFile(input_stats_file, moved_stats_dir)
input_stats_file = moved_stats_dir
except Exception: None
if run_from_scratch != 'buildExonExportFiles': ### Update DBs is an option which has been removed from 1.1. Should be a separate menu item soon.
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = importDefaults(array_type,species)
if vendor == 'Affymetrix' or vendor == 'RNASeq':
option_db['normalize_gene_data'].setArrayOptions(['NA']) ### Only use this option when processing Feature Extraction files or non-Affy non-RNA-Seq data
if vendor == 'Agilent' and 'Feature Extraction' in run_from_scratch:
option_db['normalize_gene_data'].setDefaultOption('quantile')
option_db['normalize_gene_data'].setArrayOptions(['quantile']) ### Only set this as a default when performing Feature Extraction for Agilent data
if run_from_scratch != 'Process AltAnalyze filtered' and run_from_scratch != 'Annotate External Results':
proceed = 'no'
option_db = check_moderated_support(option_db)
while proceed == 'no':
if backSelect == 'no' or 'GeneExpression' == selected_parameters[-1]:
selected_parameters.append('GeneExpression'); backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Expression Analysis Parameters')
gu = GUI(root,option_db,option_list['GeneExpression'],expr_defaults)
else: gu = PreviousResults(old_options)
try: rpkm_threshold = float(gu.Results()['rpkm_threshold'])
except Exception:
if array_type == 'RNASeq': rpkm_threshold = 1
else: rpkm_threshold = 'NA'
if array_type != "3'array":
try: dabg_p = gu.Results()['dabg_p']
except Exception:
if array_type == 'RNASeq': dabg_p = 1
else: dabg_p = 'NA'
try: gene_exp_threshold = gu.Results()['gene_exp_threshold']
except Exception:
if array_type == 'RNASeq': gene_exp_threshold = 1
else: gene_exp_threshold = 'NA'
try: exon_rpkm_threshold = gu.Results()['exon_rpkm_threshold']
except Exception:
if array_type == 'RNASeq': exon_rpkm_threshold = 1
else: exon_rpkm_threshold = 'NA'
try: exon_exp_threshold = gu.Results()['exon_exp_threshold']
except Exception:
if array_type == 'RNASeq': exon_exp_threshold = 1
else: exon_exp_threshold = 'NA'
run_from_scratch = gu.Results()['run_from_scratch']
try: expression_threshold = gu.Results()['expression_threshold']
except Exception:
if array_type == 'RNASeq': expression_threshold = 0
else: expression_threshold = 'NA'
try: perform_alt_analysis = gu.Results()['perform_alt_analysis']
except Exception: perform_alt_analysis = 'just expression'
try: analyze_as_groups = gu.Results()['analyze_as_groups']
except Exception: analyze_as_groups = ''
if perform_alt_analysis == 'just expression': perform_alt_analysis = 'expression'
else: perform_alt_analysis = 'both'
try: avg_all_for_ss = gu.Results()['avg_all_for_ss']
except Exception: avg_all_for_ss = 'no'
excludeNonExpExons = True
if 'all exon aligning' in avg_all_for_ss or 'known' in avg_all_for_ss or 'expressed exons' in avg_all_for_ss:
if 'known exons' in avg_all_for_ss and array_type == 'RNASeq': excludeNonExpExons = False
if 'known junctions' in avg_all_for_ss and array_type == 'RNASeq':
fl.setUseJunctionsForGeneExpression(True)
excludeNonExpExons = False
avg_all_for_ss = 'yes'
else: avg_all_for_ss = 'no'
expression_data_format = gu.Results()['expression_data_format']
try: normalize_feature_exp = gu.Results()['normalize_feature_exp']
except Exception: normalize_feature_exp = 'NA'
try: normalize_gene_data = gu.Results()['normalize_gene_data']
except Exception: normalize_gene_data = 'NA'
include_raw_data = gu.Results()['include_raw_data']
run_goelite = gu.Results()['run_goelite']
visualize_results = gu.Results()['visualize_results']
run_lineage_profiler = gu.Results()['run_lineage_profiler']
probability_algorithm = gu.Results()['probability_algorithm']
try: FDR_statistic = gu.Results()['FDR_statistic']
except Exception: pass
try: batch_effects = gu.Results()['batch_effects']
except Exception: batch_effects = 'NA'
try: marker_finder = gu.Results()['marker_finder']
except Exception: marker_finder = 'NA'
if 'immediately' in run_goelite: run_goelite = 'yes'
else: run_goelite = 'no'
passed = 'yes'; print_out = 'Invalid threshold entered for '
if array_type != "3'array" and array_type !='RNASeq':
try:
dabg_p = float(dabg_p)
if dabg_p<=0 or dabg_p>1: passed = 'no'; print_out+= 'DABG p-value cutoff '
except Exception: passed = 'no'; print_out+= 'DABG p-value cutoff '
if array_type != "3'array":
try:
try: rpkm_threshold = float(rpkm_threshold)
except Exception:
expression_threshold = float(expression_threshold)
if expression_threshold<1: passed = 'no'; print_out+= 'expression threshold '
except Exception: passed = 'no'; print_out+= 'expression threshold '
if array_type == 'RNASeq':
try:
rpkm_threshold = float(rpkm_threshold)
if rpkm_threshold<0: passed = 'no'; print_out+= 'RPKM threshold '
except Exception: passed = 'no'; print_out+= 'RPKM threshold '
try:
exon_exp_threshold = float(exon_exp_threshold)
if exon_exp_threshold<0: passed = 'no'; print_out+= 'Exon expression threshold '
except Exception: passed = 'no'; print_out+= 'Exon expression threshold '
try:
exon_rpkm_threshold = float(exon_rpkm_threshold)
if exon_rpkm_threshold<0: passed = 'no'; print_out+= 'Exon RPKM threshold '
except Exception: passed = 'no'; print_out+= 'Exon RPKM threshold '
try:
gene_exp_threshold = float(gene_exp_threshold)
if gene_exp_threshold<0: passed = 'no'; print_out+= 'Gene expression threshold '
except Exception: passed = 'no'; print_out+= 'Gene expression threshold '
if visualize_results == 'yes':
try:
### Tests to make sure these are installed - required for visualization
try: import matplotlib as mpl
except Exception: from matplotlib import mpl
from numpy import array
from scipy import rand
except Exception:
passed = 'no'; print_out = 'Support for matplotlib, numpy and scipy must specifically be installed to perform data visualization.\n'
print_out += traceback.format_exc() ### useful for seeing a warning window with the actuall error
if passed == 'no': IndicatorWindow(print_out,'Continue')
else: proceed = 'yes'
if run_lineage_profiler == 'yes':
verifyLineageProfilerDatabases(species,'GUI')
if (perform_alt_analysis == 'both') or (run_from_scratch == 'Process AltAnalyze filtered') or (run_from_scratch == 'Annotate External Results'):
perform_alt_analysis = 'yes'
if run_from_scratch == 'Process AltAnalyze filtered':
input_filtered_dir = ''
while len(input_filtered_dir)<1:
if backSelect == 'no' or 'InputFilteredFiles' == selected_parameters[-1]:
selected_parameters.append('InputFilteredFiles'); backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Select AltAnalyze Filtered Probe set Files')
gu = GUI(root,option_db,option_list['InputFilteredFiles'],'')
else: gu = PreviousResults(old_options)
try:
input_filtered_dir = gu.Results()['input_filtered_dir']
if 'FullDataset' in input_filtered_dir: alt_exon_defaults[3] = 'all groups'
except Exception: input_filtered_dir = ''
if input_filtered_dir == '':
print_out = "The directory containing filtered probe set text files has not\nbeen assigned! Select a valid directory before proceeding."
IndicatorWindow(print_out,'Continue')
fl = ExpressionFileLocationData('','','',''); dataset_name = 'filtered-exp_dir'
dirs = string.split(input_filtered_dir,'AltExpression'); parent_dir = dirs[0]
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl
if run_from_scratch == 'Annotate External Results':
input_filtered_dir = ''
while len(input_filtered_dir)<1:
if backSelect == 'no' or 'InputExternalFiles' == selected_parameters[-1]:
selected_parameters.append('InputExternalFiles'); backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Select AltAnalyze Filtered Probe set Files')
gu = GUI(root,option_db,option_list['InputExternalFiles'],'')
else: gu = PreviousResults(old_options)
try: input_filtered_dir = gu.Results()['input_external_dir']
except Exception: input_filtered_dir = ''
if input_filtered_dir == '':
print_out = "The directory containing external probe set text files has not\nbeen assigned! Select a valid directory before proceeding."
IndicatorWindow(print_out,'Continue')
fl = ExpressionFileLocationData('','','',''); dataset_name = 'external-results_dir'
dirs = string.split(input_filtered_dir,'AltExpression'); parent_dir = dirs[0]
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl
#print option_list[i:i+len(alt_exon_defaults)+len(functional_analysis_defaults)], alt_exon_defaults+functional_analysis_defaults;kill
option_list,option_db = importUserOptions(array_type) ##Initially used to just get the info for species and array_type
if backSelect == 'yes':
for option in old_options: ### Set options to user selected
try: option_db[option].setDefaultOption(old_options[option])
except Exception: null=[]
if run_from_scratch == 'Process AltAnalyze filtered':
if array_type == 'RNASeq': cs_name = 'known exons'
else: cs_name = 'constitutive probesets'
functional_analysis_defaults.append(cs_name); option_list['AltAnalyze'].append('avg_all_for_ss')
if run_goelite == 'no': ### run_goelite will be set to no by default
functional_analysis_defaults.append('unpaired t-test'); option_list['AltAnalyze'].append('probability_algorithm')
functional_analysis_defaults.append('decide later'); option_list['AltAnalyze'].append('run_goelite')
if run_from_scratch == 'Annotate External Results':
### Remove options relating to expression analysis when importing filtered probeset lists
options_to_exclude = ['analysis_method','p_threshold','gene_expression_cutoff','alt_exon_fold_cutoff','run_MiDAS']
options_to_exclude+= ['export_splice_index_values','probability_algorithm','run_goelite','analyze_all_conditions','calculate_splicing_index_p']
for option in options_to_exclude: del option_db[option]
proceed = 'no'
while proceed == 'no':
if backSelect == 'no' or 'AltAnalyze' == selected_parameters[-1]:
selected_parameters.append('AltAnalyze'); backSelect = 'no'; proceed = 'no'
root = Tk(); root.title('AltAnalyze: Alternative Exon Analysis Parameters')
gu = GUI(root,option_db,option_list['AltAnalyze'],alt_exon_defaults+functional_analysis_defaults); #user_variables = {};
try: analyze_all_conditions = gu.Results()['analyze_all_conditions']
except KeyError: analyze_all_conditions = 'pairwise'
if analyze_all_conditions != 'pairwise':
print_out = 'Please note: When AltAnalyze compares all groups, the\nalternative exon fold to be filtered will be based on the\nlargest alternative exon fold for all possible comparisons.'
IndicatorWindowSimple(print_out,'Continue')
else: gu = PreviousResults(old_options)
try: analysis_method = gu.Results()['analysis_method']
except Exception: analysis_method = analysis_method
try: p_threshold = gu.Results()['p_threshold']
except Exception: p_threshold = 0.05
try: gene_expression_cutoff = gu.Results()['gene_expression_cutoff']
except Exception: gene_expression_cutoff = 3
try: remove_intronic_junctions = gu.Results()['remove_intronic_junctions']
except Exception: remove_intronic_junctions = 'NA'
try: filter_probeset_types = gu.Results()['filter_probe_types']
except Exception: filter_probeset_types = 'core'
try: alt_exon_fold_cutoff = gu.Results()['alt_exon_fold_cutoff']
except KeyError: alt_exon_fold_cutoff = 2
try: permute_p_threshold = gu.Results()['permute_p_threshold']
except KeyError: permute_p_threshold = 0.05 ### Doesn't matter, not used
try:
additional_algorithms = gu.Results()['additional_algorithms']
additional_algorithms = AdditionalAlgorithms(additional_algorithms)
except KeyError: additionalAlgorithms = AdditionalAlgorithms('')
try:
additional_score = gu.Results()['additional_score']
additional_algorithms.setScore(additional_score)
except Exception:
try: additional_algorithms.setScore(2)
except Exception: null=[]
try: perform_permutation_analysis = gu.Results()['perform_permutation_analysis']
except KeyError: perform_permutation_analysis = perform_permutation_analysis
try: export_splice_index_values = gu.Results()['export_splice_index_values']
except KeyError: export_splice_index_values = export_splice_index_values
try: run_MiDAS = gu.Results()['run_MiDAS']
except KeyError: run_MiDAS = run_MiDAS
try: analyze_all_conditions = gu.Results()['analyze_all_conditions']
except KeyError: analyze_all_conditions = analyze_all_conditions
try: run_goelite = gu.Results()['run_goelite']
except KeyError: run_goelite = run_goelite
try: probability_algorithm = gu.Results()['probability_algorithm']
except KeyError: probability_algorithm = probability_algorithm
try:
avg_all_for_ss = gu.Results()['avg_all_for_ss']
if 'all exon aligning' in avg_all_for_ss or 'known' in avg_all_for_ss or 'core' in avg_all_for_ss or 'expressed exons' in avg_all_for_ss:
avg_all_for_ss = 'yes'
else: avg_all_for_ss = 'no'
except Exception:
try: avg_all_for_ss = avg_all_for_ss
except Exception: avg_all_for_ss = 'no'
if 'immediately' in run_goelite: run_goelite = 'yes'
else: run_goelite = 'no'
try: calculate_splicing_index_p = gu.Results()['calculate_splicing_index_p']
except KeyError: calculate_splicing_index_p = calculate_splicing_index_p
analyze_functional_attributes = gu.Results()['analyze_functional_attributes']
filter_for_AS = gu.Results()['filter_for_AS']
microRNA_prediction_method = gu.Results()['microRNA_prediction_method']
if analysis_method == 'splicing-index': p_threshold = float(p_threshold)
else:
try: p_threshold = float(permute_p_threshold)
except ValueError: permute_p_threshold = permute_p_threshold
if analysis_method == 'linearregres-rlm':
### Test installation of rpy and/or R
x = [5.05, 6.75, 3.21, 2.66]; y = [1.65, 26.5, -5.93, 7.96]
try: s = statistics.LinearRegression(x,y,'no')
except Exception:
print_out = "The local installation of R and rpy is missing or\nis not properly configured. See the AltAnalyze ReadMe\nfor more information (may require loading AltAnalyze from source code)."
IndicatorWindow(print_out,'Continue'); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
passed = 'yes'; print_out = 'Invalid threshold entered for '
try: gene_expression_cutoff = float(gene_expression_cutoff)
except Exception: passed = 'no'; print_out+= 'gene expression cutoff'
try: alt_exon_fold_cutoff = float(alt_exon_fold_cutoff)
except Exception: passed = 'no'; print_out+= 'alternative exon fold change'
try: p_threshold = float(p_threshold)
except Exception: passed = 'no'; print_out+= 'alternative exon p-value'
if gene_expression_cutoff <= 1: passed = 'no'; print_out+= 'gene expression cutoff'
elif alt_exon_fold_cutoff < 1:
if analysis_method == 'splicing-index': passed = 'no'; print_out+= 'splicing-index fold change'
elif alt_exon_fold_cutoff < 0: passed = 'no'; print_out+= 'alternative exon fold change'
elif p_threshold <= 0: passed = 'no'; print_out+= 'alternative exon p-value'
if passed == 'no': IndicatorWindow(print_out,'Continue')
else: proceed = 'yes'
if run_goelite == 'yes':
option_db['get_additional'].setArrayOptions(['---']+importResourceList())
option_db['get_additional'].setDefaultOption('---')
### Populate variables based on the existing imported data
default_resources = option_db['resources_to_analyze'].ArrayOptions() ### Include alternative ontologies and gene-lists
import_dir1 = '/AltDatabase/goelite/'+species+'/gene-mapp'
import_dir2 = '/AltDatabase/goelite/'+species+'/gene-go'
try:
gene_mapp_list = read_directory(import_dir1)
gene_mapp_list.sort()
for file in gene_mapp_list:
resource = string.split(file,'-')[-1][:-4]
if resource != 'MAPP' and resource not in default_resources and '.txt' in file:
default_resources.append(resource)
except Exception: null=[]
try:
gene_go_list = read_directory(import_dir2)
gene_go_list.sort()
for file in gene_go_list:
resource = string.split(file,'-')[-1][:-4]
if resource != 'GeneOntology' and resource not in default_resources and 'version' not in resource and '.txt' in file:
default_resources.append(resource)
except Exception: null=[]
option_db['resources_to_analyze'].setArrayOptions(default_resources)
if run_from_scratch == 'Process AltAnalyze filtered':
### Do not include gene expression analysis filters
option_list['GOElite'] = option_list['GOElite'][3:]; goelite_defaults = goelite_defaults[3:]
if backSelect == 'no' or 'GOElite' == selected_parameters[-1]:
selected_parameters.append('GOElite'); backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Pathway Analysis Parameters')
gu = GUI(root,option_db,option_list['GOElite'],goelite_defaults)
else: gu = PreviousResults(old_options)
if run_from_scratch != 'Process AltAnalyze filtered':
ge_fold_cutoffs = gu.Results()['ge_fold_cutoffs']
ge_pvalue_cutoffs = gu.Results()['ge_pvalue_cutoffs']
ge_ptype = gu.Results()['ge_ptype']
filter_method = gu.Results()['filter_method']
z_threshold = gu.Results()['z_threshold']
returnPathways = gu.Results()['returnPathways']
p_val_threshold = gu.Results()['p_val_threshold']
change_threshold = gu.Results()['change_threshold']
resources_to_analyze = gu.Results()['resources_to_analyze']
pathway_permutations = gu.Results()['pathway_permutations']
get_additional = gu.Results()['get_additional']
ORA_algorithm = gu.Results()['ORA_algorithm']
mod = gu.Results()['mod']
ge_fold_cutoffs = float(ge_fold_cutoffs)
change_threshold = float(change_threshold) - 1 ### This reflects the > statement in the GO-Elite filtering
if ORA_algorithm == 'Fisher Exact Test':
pathway_permutations = 'FisherExactTest'
if get_additional != '---':
analysis = 'getAdditionalOnlineResources'
values = species,get_additional
StatusWindow(values,analysis) ### display an window with download status
except OSError:
null=[]; sys.exit()
"""In this next section, create a set of GUI windows NOT defined by the options.txt file.
These are the groups and comps files"""
original_comp_group_list=[]; array_group_list=[]; group_name_list=[]
if run_from_scratch != 'Process AltAnalyze filtered' and run_from_scratch != 'Annotate External Results': ### Groups and Comps already defined
if run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or 'Feature Extraction' in run_from_scratch:
if 'exp.' not in dataset_name: dataset_name = 'exp.'+dataset_name+'.txt'
groups_name = string.replace(dataset_name,'exp.','groups.')
comps_name = string.replace(dataset_name,'exp.','comps.')
batch_name = string.replace(groups_name,'groups.','batch.') ### may not apply
if "ExpressionInput" not in output_dir:
output_dir = output_dir + '/ExpressionInput' ### Store the result files here so that files don't get mixed up
try: os.mkdir(output_dir) ### Since this directory doesn't exist we have to make it
except OSError: null = [] ### Directory already exists
exp_file_dir = output_dir+'/'+dataset_name
### store file locations (also use these later when running APT)
stats_file_dir = string.replace(exp_file_dir,'exp.','stats.')
groups_file_dir = string.replace(exp_file_dir,'exp.','groups.')
comps_file_dir = string.replace(exp_file_dir,'exp.','comps.')
batch_file_dir = string.replace(groups_file_dir, 'groups.','batch.')
fl = ExpressionFileLocationData(exp_file_dir,stats_file_dir,groups_file_dir,comps_file_dir)
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl
parent_dir = output_dir ### interchangable terms (parent_dir used with expression file import)
print groups_file_dir
if run_from_scratch == 'Process Expression file':
if len(input_exp_file)>0:
if len(input_stats_file)>1: ###Make sure the files have the same arrays and order first
try: cel_files2, array_linker_db2 = ExpressionBuilder.getArrayHeaders(input_stats_file)
except Exception:
print_out = "Input Expression file does not have a valid format."
IndicatorWindow(print_out,'Continue'); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
if cel_files2 != cel_files:
print_out = "The probe set p-value file:\n"+input_stats_file+"\ndoes not have the same array order as the\nexpression file. Correct before proceeding."
IndicatorWindow(print_out,'Continue')
### Check to see if a groups/comps file already exists and add file locations to 'exp_file_location_db'
ief_list = string.split(input_exp_file,'/'); parent_dir = string.join(ief_list[:-1],'/'); exp_name = ief_list[-1]
dataset_name = string.replace(exp_name,'exp.','')
groups_name = 'groups.'+dataset_name; comps_name = 'comps.'+dataset_name
batch_name = string.replace(groups_name,'groups.','batch.') ### may not apply
groups_file_dir = parent_dir+'/'+groups_name; comps_file_dir = parent_dir+'/'+comps_name
batch_file_dir = string.replace(groups_file_dir, 'groups.','batch.')
fl = ExpressionFileLocationData(input_exp_file,input_stats_file,groups_file_dir,comps_file_dir)
dataset_name = exp_name
exp_file_location_db={}; exp_file_location_db[exp_name]=fl
else:
### This occurs if running files in the ExpressionInput folder. However, if so, we won't allow for GUI based creation of groups and comps files (too complicated and confusing for user).
### Grab all expression file locations, where the expression, groups and comps file exist for a dataset
exp_file_location_db = importExpressionFiles() ###Don't create 'array_group_list', but pass the 'exp_file_location_db' onto ExpressionBuilder
### Import array-group and group comparisons. Only time relevant for probesetSummarization is when an error is encountered and re-running
try: dir_files = read_directory(parent_dir)
except Exception: dir_files=[]
array_group_list=[]
array_batch_list=[]
if backSelect == 'yes':
for cel_file in cel_files:
if cel_file in user_variables:
group_name = user_variables[cel_file]; group = ''
else:
group = ''; group_name = ''
agd = ArrayGroupData(cel_file,group,group_name); array_group_list.append(agd)
if batch_effects == 'yes': ### Used during backselect (must include a 'batch' variable in the stored var name)
if (cel_file,'batch') in user_variables:
batch_name = user_variables[cel_file,'batch']; batch = ''
else:
batch = ''; batch_name = ''
agd = ArrayGroupData(cel_file,batch,batch_name); array_batch_list.append(agd); batch_db=[]
elif run_from_scratch == 'buildExonExportFiles':
fl = ExpressionFileLocationData('','','',''); fl.setExonBedBuildStatus('yes'); fl.setFeatureNormalization('none')
fl.setCELFileDir(cel_file_dir); fl.setArrayType(array_type); fl.setOutputDir(output_dir); fl.setMultiThreading(multiThreading)
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl; parent_dir = output_dir
perform_alt_analysis = 'expression'
elif groups_name in dir_files:
try:
### Try to import any current annotations and verify that the samples indicated in the input directory are in the corresponding groups file
array_group_list,group_db = importArrayGroupsSimple(groups_file_dir,cel_files) #agd = ArrayGroupData(array_header,group,group_name)
except Exception,e:
### Over-write these annotations if theres is a problem
for cel_file in cel_files:
group = ''; group_name = ''
agd = ArrayGroupData(cel_file,group,group_name); array_group_list.append(agd); group_db=[]
if batch_effects == 'yes':
if batch_name in dir_files: ### Almost identical format and output files (import existing if present here)
try:
array_batch_list,batch_db = importArrayGroupsSimple(batch_file_dir,cel_files) #agd = ArrayGroupData(array_header,group,group_name)
except Exception,e:
for cel_file in cel_files:
batch = ''; batch_name = ''
agd = ArrayGroupData(cel_file,batch,batch_name); array_batch_list.append(agd); batch_db=[]
else:
for cel_file in cel_files:
batch = ''; batch_name = ''
agd = ArrayGroupData(cel_file,batch,batch_name); array_batch_list.append(agd); batch_db=[]
if comps_name in dir_files and len(group_db)>0:
comp_group_list, null = ExpressionBuilder.importComparisonGroups(comps_file_dir)
for group1,group2 in comp_group_list:
try: group_name1 = group_db[int(group1)]; group_name2 = group_db[int(group2)]
except KeyError:
print_out = 'The "comps." file for this dataset has group numbers\nnot listed in the "groups." file.'
WarningWindow(print_out,'Exit'); AltAnalyze.AltAnalyzeSetup('no'); sys.exit()
original_comp_group_list.append((group_name1,group_name2)) ### If comparisons already exist, default to these
else:
for cel_file in cel_files:
group = ''; group_name = ''
agd = ArrayGroupData(cel_file,group,group_name); array_group_list.append(agd)
if len(array_group_list)>0: ### Thus we are not analyzing the default (ExpressionInput) directory of expression, group and comp data.
original_option_db,original_option_list = option_db,option_list
option_db,option_list = formatArrayGroupsForGUI(array_group_list)
###Force this GUI to repeat until the user fills in each entry, but record what they did add
user_variables_long={}
while len(user_variables_long) != len(option_db):
if backSelect == 'no' or 'GroupArrays' == selected_parameters[-1]:
selected_parameters.append('GroupArrays'); backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Assign files to a Group Annotation'); user_variables_long={}
#import copy; user_variables_original = copy.deepcopy(user_variables); user_variables={}
gu = GUI(root,option_db,option_list['GroupArrays'],'groups')
else: gu = PreviousResults(old_options)
try: predictGroups = gu.Results()['PredictGroups']
except Exception: predictGroups = False
for option in user_variables: ### By default, all arrays will be assigned a group of ''
try:
if len(user_variables[option])>0 and 'batch' not in option:
if option in option_db: user_variables_long[option]=[]
except Exception: null=[]
###Store the group names and assign group numbers
group_name_db={}; group_name_list = []; group_number = 1
for cel_file in option_list['GroupArrays']: ### start we these CEL files, since they are ordered according to their order in the expression dataset
group_name = gu.Results()[cel_file]
if group_name not in group_name_db:
if group_name != 'yes' and group_name !='no': ### Results for PredictGroups
group_name_db[group_name]=group_number; group_number+=1
group_name_list.append(group_name)
if len(group_name_db)==2: analyze_all_conditions = 'pairwise' ### Don't allow multiple comparison analysis if only two conditions present
###Store the group names and numbers with each array_id in memory
for agd in array_group_list:
cel_file = agd.Array()
group_name = gu.Results()[cel_file] ###Lookup the new group assignment entered by the user
group_number = group_name_db[group_name]
agd.setGroupName(group_name); agd.setGroup(group_number)
if predictGroups == 'yes':
predictGroups = True; break
elif predictGroups == 'no': predictGroups = False
elif (len(user_variables_long) != len(option_db)) or len(group_name_db)<2:
if len(group_name_db)<2:
print_out = "At least two array groups must be established\nbefore proceeding."
else:
print_out = "Not all arrays have been assigned a group. Please\nassign to a group before proceeding (required)."
IndicatorWindow(print_out,'Continue')
option_db,option_list = formatArrayGroupsForGUI(array_group_list) ### array_group_list at this point will be updated with any changes made in the GUI by the user
if predictGroups == False:
exported = 0 ### Export Groups file
while exported == 0:
try:
fl = exp_file_location_db[dataset_name]; groups_file = fl.GroupsFile()
exportGroups(exp_file_location_db,array_group_list)
exported = 1
except Exception:
print_out = "The file:\n"+groups_file+"\nis still open. This file must be closed before proceeding"
IndicatorWindow(print_out,'Continue')
exported = 0
if batch_effects == 'yes':
option_db,option_list = formatArrayGroupsForGUI(array_batch_list, category = 'BatchArrays')
###Force this GUI to repeat until the user fills in each entry, but record what they did add
user_variables_long={}
while len(user_variables_long) != len(option_db):
if backSelect == 'no' or 'BatchArrays' == selected_parameters[-1]:
selected_parameters.append('BatchArrays'); backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Indicate Which Batch a File is From'); user_variables_long={}
#import copy; user_variables_original = copy.deepcopy(user_variables); user_variables={}
gu = GUI(root,option_db,option_list['BatchArrays'],'batch')
else: gu = PreviousResults(old_options)
for option in user_variables: ### By default, all arrays will be assigned a batch of ''
try:
if len(user_variables[option])>0 and 'batch' in option:
if option[0] in option_db: user_variables_long[option]=[]
except Exception: null=[]
###Store the batch names and assign batch numbers
batch_name_db={}; batch_name_list = []; batch_number = 1
print option_list['BatchArrays']
for cel_file in option_list['BatchArrays']: ### start we these CEL files, since they are ordered according to their order in the expression dataset
batch_name = gu.Results()[cel_file,'batch']
if batch_name not in batch_name_db:
batch_name_db[batch_name]=batch_number; batch_number+=1
batch_name_list.append(batch_name)
if len(batch_name_db)==2: analyze_all_conditions = 'pairwise' ### Don't allow multiple comparison analysis if only two conditions present
###Store the batch names and numbers with each array_id in memory
for agd in array_batch_list:
cel_file = agd.Array()
batch_name = gu.Results()[cel_file,'batch'] ###Lookup the new batch assignment entered by the user
batch_number = batch_name_db[batch_name]
agd.setGroupName(batch_name); agd.setGroup(batch_number)
if (len(user_variables_long) != len(option_db)) or len(batch_name_db)<2:
if len(batch_name_db)<2:
print_out = "At least two sample batchs must be established\nbefore proceeding."
else:
print_out = "Not all arrays have been assigned a batch. Please\nassign to a batch before proceeding (required)."
IndicatorWindow(print_out,'Continue')
option_db,option_list = formatArrayGroupsForGUI(array_batch_list, category = 'BatchArrays') ### array_batch_list at this point will be updated with any changes made in the GUI by the user
exported = 0 ### Export Batch file
while exported == 0:
try:
fl = exp_file_location_db[dataset_name]
exportGroups(exp_file_location_db,array_batch_list,filetype='Batch')
exported = 1
except Exception:
print_out = "The file:\n"+batch_file_dir+"\nis still open. This file must be closed before proceeding"
IndicatorWindow(print_out,'Continue')
exported = 0
i=2; px=0 ###Determine the number of possible comparisons based on the number of groups
while i<=len(group_name_list): px = px + i - 1; i+=1
group_name_list.reverse(); group_name_list.append(''); group_name_list.reverse() ### add a null entry first
if px > 150: px = 150 ### With very large datasets, AltAnalyze stalls
possible_comps = px
### Format input for GUI like the imported options.txt Config file, except allow for custom fields in the GUI class
category = 'SetupComps'; option_db={}; option_list={}; cn = 0 #; user_variables={}
while cn < px:
try: group1,group2 = original_comp_group_list[cn]
except IndexError: group1='';group2=''
cn+=1; option = 'comparison '+str(cn); array_options = group_name_list; displayed_title=option; display_object='pulldown_comps'; notes=[group1,group2]
od = OptionData(option,displayed_title,display_object,notes,array_options,'')
option_db[option] = od
try: option_list[category].append(option) ###group is the name of the GUI menu group
except KeyError: option_list[category] = [option]
proceed = 'no'
while proceed == 'no' and analyze_all_conditions != 'all groups':
identical_groups = 'no'; comp_groups_db={}; proceed = 'no'
if (backSelect == 'no' or 'SetupComps' == selected_parameters[-1]):
selected_parameters.append('SetupComps'); backSelect = 'no'
root = Tk(); root.title('AltAnalyze: Establish All Pairwise Comparisons')
gu = GUI(root,option_db,option_list['SetupComps'],'comps')
else: gu = PreviousResults(old_options)
### Sort comparisons from user for export
for comparison in gu.Results():
try:
group_name = gu.Results()[comparison]
if len(group_name)>0 and 'comparison' in comparison: ### Group_names are by default blank
cn_main,cn_minor = string.split(comparison[11:],'-') ### e.g. 1-1 and 1-2
try:
null = int(cn_main); null = int(cn_minor)
try: comp_groups_db[cn_main].append([cn_minor,group_name])
except KeyError: comp_groups_db[cn_main]=[[cn_minor,group_name]]
except Exception: null=[]
except Exception: null=[]
print_out = "You must pick at least one comparison group before proceeding."
if len(comp_groups_db)>0:
try:
comp_group_list=[]
for cn_main in comp_groups_db:
cg = comp_groups_db[cn_main]
cg.sort()
comp_group_list.append([cn_main,[group_name_db[cg[0][1]],group_name_db[cg[1][1]]]])
if cg[0][1] == cg[1][1]: identical_groups = 'yes' ### Thus the two groups in the comparisons are identical, flag
comp_group_list.sort()
proceed = 'yes'
except Exception:
print traceback.format_exc()
print_out = "You must pick at least two groups for each comparison."
if identical_groups == 'yes': proceed = 'no'; print_out = "The same group is listed as both the experimental and\ncontrol group in a comparison. Fix before proceeding."
if proceed == 'no': IndicatorWindow(print_out,'Continue')
### Export user modified comps files
while exported == 0:
try:
fl = exp_file_location_db[dataset_name]; comps_file = fl.CompsFile()
if analyze_all_conditions != 'all groups': exportComps(exp_file_location_db,comp_group_list)
exported = 1
except Exception:
print_out = "The file:\n"+comps_file+"\nis still open. This file must be closed before proceeding"
IndicatorWindow(print_out,'Continue')
### See if there are any Affymetrix annotation files for this species
import_dir = '/AltDatabase/affymetrix/'+species
try: dir_list = read_directory(import_dir); fn_dir = filepath(import_dir[1:]); species_dir_found = 'yes'
except Exception: fn_dir = filepath(import_dir); dir_list = []; species_dir_found = 'no'
### Used to check if the user has an Affymetrix CSV file around... no longer needed
"""
if (len(dir_list)<1 or species_dir_found == 'no') and array_type != 'exon':
print_out = 'No Affymetrix annnotations file found in the directory:\n'+fn_dir
print_out += '\n\nTo download, click on the below button, find your array and download the annotation CSV file'
print_out += '\nlisted under "Current NetAffx Annotation Files". Extract the compressed zip archive to the'
print_out += '\nabove listed directory and hit continue to include these annotations in your results file.'
button_text = 'Download Annotations'; url = 'http://www.affymetrix.com/support/technical/byproduct.affx?cat=arrays'
IndicatorLinkOutWindow(print_out,button_text,url)
"""
if len(input_fastq_dir)>0:
array_type = "3'array"
vendor = 'other:Ensembl' ### Ensembl linked system name
if microRNA_prediction_method == 'two or more': microRNA_prediction_method = 'multiple'
else: microRNA_prediction_method = 'any'
try: permute_p_threshold = float(permute_p_threshold)
except ValueError: permute_p_threshold = permute_p_threshold
try: dabg_p = float(dabg_p)
except ValueError: dabg_p = dabg_p
try: expression_threshold = float(expression_threshold)
except ValueError: expression_threshold = expression_threshold
try: alt_exon_fold_cutoff = float(alt_exon_fold_cutoff)
except ValueError: alt_exon_fold_cutoff = alt_exon_fold_cutoff
try: gene_expression_cutoff = float(gene_expression_cutoff)
except ValueError: gene_expression_cutoff = gene_expression_cutoff
### Find the current verison of APT (if user deletes location in Config file) and set APT file locations
apt_location = getAPTLocations(file_location_defaults,run_from_scratch,run_MiDAS)
### Set the primary parent directory for ExpressionBuilder and AltAnalyze (one level above the ExpressionInput directory, if present)
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset_name]
fl.setAPTLocation(apt_location)
if run_from_scratch == 'Process CEL files' or 'Feature Extraction' in run_from_scratch:
fl.setInputCDFFile(input_cdf_file); fl.setCLFFile(clf_file); fl.setBGPFile(bgp_file); fl.setXHybRemoval(remove_xhyb)
fl.setCELFileDir(cel_file_dir); fl.setArrayType(array_type); fl.setOutputDir(output_dir)
fl.setChannelToExtract(channel_to_extract)
elif run_from_scratch == 'Process RNA-seq reads':
fl.setCELFileDir(cel_file_dir); fl.setOutputDir(output_dir); fl.setExonBedBuildStatus(build_exon_bedfile)
fl.setRunKallisto(input_fastq_dir);
if array_type != 'gene' and array_type != "3'array":
compendiumPlatform = 'exon'
fl = exp_file_location_db[dataset]; fl.setRootDir(parent_dir)
fl.setFeatureNormalization(normalize_feature_exp)
fl.setNormMatrix(normalize_gene_data)
fl.setProbabilityStatistic(probability_algorithm)
fl.setBatchEffectRemoval(batch_effects)
fl.setMarkerFinder(marker_finder)
fl.setProducePlots(visualize_results)
fl.setPerformLineageProfiler(run_lineage_profiler)
fl.setCompendiumType(compendiumType)
fl.setCompendiumPlatform(compendiumPlatform)
fl.setVendor(vendor)
try: fl.setFDRStatistic(FDR_statistic)
except Exception: pass
try: fl.setExcludeLowExpressionExons(excludeNonExpExons)
except Exception: fl.setExcludeLowExpressionExons(True)
try: fl.setPredictGroups(predictGroups)
except Exception: fl.setPredictGroups(False)
try: fl.setPredictGroupsParams(gsp)
except Exception: pass
fl.setMultiThreading(multiThreading)
if run_from_scratch == 'Process Expression file':
fl.setRootDir(output_dir) ### When the data is not primary array data files, allow for option selection of the output directory
fl.setOutputDir(output_dir)
try: fl.setRPKMThreshold(rpkm_threshold)
except Exception: pass
try: fl.setGeneExpThreshold(gene_exp_threshold)
except Exception: pass
if array_type == 'RNASeq': ### Post version 2.0, add variables in fl rather than below
fl.setRPKMThreshold(rpkm_threshold)
fl.setExonExpThreshold(exon_exp_threshold)
fl.setGeneExpThreshold(gene_exp_threshold)
fl.setExonRPKMThreshold(exon_rpkm_threshold)
fl.setJunctionExpThreshold(expression_threshold)
try: fl.setMLP(mlp)
except Exception: pass
if predictGroups:
### Single-Cell Analysis Parameters
try: option_db,option_list=original_option_db,original_option_list ### was re-set above... needed to get the propper data from the last loop
except Exception: option_list,option_db = importUserOptions(array_type,vendor=vendor)
selected_parameters.append('PredictGroups')
supported_geneset_types = getSupportedGeneSetTypes(species,'gene-mapp')
supported_geneset_types += getSupportedGeneSetTypes(species,'gene-go')
option_db['GeneSetSelectionPredict'].setArrayOptions(['None Selected']+supported_geneset_types)
option_db['PathwaySelectionPredict'].setArrayOptions(['None Selected'])
#option_db['PathwaySelection'].setArrayOptions(supported_genesets)
status = 'repeat'
while status == 'repeat':
root = Tk()
root.title('AltAnalyze: Predict Sample Groups')
### Run in GUI and wait to be executed
gu = GUI(root,option_db,option_list['PredictGroups'],'')
### Permission to run full analsyis is granted, proceed
gsp = gu.Results()['gsp']
status = 'continue'
import RNASeq
expFile = fl.ExpFile()
mlp_instance = fl.MLP()
global logfile
root_dir = export.findParentDir(expFile)
root_dir = string.replace(root_dir,'/ExpressionInput','')
time_stamp = AltAnalyze.timestamp()
logfile = filepath(root_dir+'AltAnalyze_report-'+time_stamp+'.log')
count = verifyFileLength(expFile[:-4]+'-steady-state.txt')
if count>1:
expFile = expFile[:-4]+'-steady-state.txt'
elif array_type=='RNASeq' or len(input_fastq_dir)>0:
### Indicates that the steady-state file doesn't exist. The exp. may exist, be could be junction only so need to re-build from bed files here
values = species,exp_file_location_db,dataset,mlp_instance
StatusWindow(values,'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
if array_type=='RNASeq':
expFile = expFile[:-4]+'-steady-state.txt'
"""
else:
print_out = 'WARNING... Prior to running ICGS, you must first run AltAnalyze\nusing assigned groups for this array type.'
IndicatorWindow(print_out,'Continue')
AltAnalyze.AltAnalyzeSetup((selected_parameters[:-1],user_variables)); sys.exit()"""
values = expFile, mlp_instance, gsp, False
StatusWindow(values,'predictGroups') ### proceed to run the full discovery analysis here!!!
if len(graphic_links)>0:
root = Tk()
root.title('AltAnalyze: Evaluate Sampled Groupings')
### Review results in custom GUI for predicting groups
gu = GUI(root,'PredictGroups',[],'')
nextStep = gu.Results()['next']
group_selected = gu.Results()['group_select']
if nextStep == 'UseSelected':
group_selected = group_selected[:-4]+'.txt'
RNASeq.exportGroupsFromClusters(group_selected,fl.ExpFile(),array_type)
run_from_scratch = 'Process Expression file'
else:
#print 're-initializing window'
AltAnalyze.AltAnalyzeSetup((selected_parameters,user_variables)); sys.exit()
else:
AltAnalyze.AltAnalyzeSetup((selected_parameters,user_variables)); sys.exit()
expr_var = species,array_type,vendor,constitutive_source,dabg_p,expression_threshold,avg_all_for_ss,expression_data_format,include_raw_data, run_from_scratch, perform_alt_analysis
alt_var = analysis_method,p_threshold,filter_probeset_types,alt_exon_fold_cutoff,gene_expression_cutoff,remove_intronic_junctions,permute_p_threshold, perform_permutation_analysis, export_splice_index_values, analyze_all_conditions
additional_var = calculate_splicing_index_p, run_MiDAS, analyze_functional_attributes, microRNA_prediction_method, filter_for_AS, additional_algorithms
goelite_var = ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,pathway_permutations,mod,returnPathways
return expr_var, alt_var, additional_var, goelite_var, exp_file_location_db
def getAPTLocations(file_location_defaults,run_from_scratch,run_MiDAS):
import ResultsExport_module
if 'APT' in file_location_defaults:
fl = file_location_defaults['APT']
apt_location = fl.Location() ###Only one entry for all species
if len(apt_location)<1: ###If no APT version is designated, prompt the user to find the directory
if run_from_scratch == 'CEL_summarize':
print_out = 'To proceed with probeset summarization from CEL files,\nyou must select a valid Affymetrix Power Tools Directory.'
elif run_MiDAS == 'yes':
print_out = "To proceed with running MiDAS, you must select\na valid Affymetrix Power Tools Directory."
win_info = IndicatorChooseWindow(print_out,'Continue') ### Prompt the user to locate the APT directory
apt_location = win_info.Folder()
fl.SetLocation(apt_location)
exportDefaultFileLocations(file_location_defaults)
return apt_location
def check_moderated_support(option_db):
""" Excludes moderated t-test support when module import fails... shouldn't fail """
try:
import mpmath
except Exception,e:
a = traceback.format_exc()
GUIcriticalError(a)
keep=[]
od = option_db['probability_algorithm']
for i in od.ArrayOptions():
if 'oderate' not in i: keep.append(i)
od.setArrayOptions(keep) ### remove any moderated stats
od.setDefaultOption(keep[0]) ### Change the default value to one of those in keep
return option_db
def GUIcriticalError(log_report):
log_file = filepath('GUIerror.log')
data = open(log_file,'w')
data.write(log_report); data.close()
"""
if os.name == 'nt':
try: os.startfile('"'+log_file+'"')
except Exception: os.system('open "'+log_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+log_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+log_file+'/"')
"""
class GeneSelectionParameters:
### This class specifies parameters for filtering a large dataset for downstream gene or pathway analysis/visualization
def __init__(self, species, platform, vendor):
self._species = species; self._platform = platform; self._vendor = vendor
self._PathwaySelect = False; self._gene = False; self._GeneSet = False
self._Normalize = False
def Species(self): return self._species
def Platform(self): return self._platform
def Vendor(self): return self._vendor
def setGeneSet(self,gene_set): self._GeneSet = gene_set
def GeneSet(self):
if isinstance(self._GeneSet, tuple) or isinstance(self._GeneSet, list):
return tuple(self._GeneSet)
else:
return self._GeneSet
def setPathwaySelect(self,pathway): self._PathwaySelect = pathway
def setJustShowTheseIDs(self, justShowTheseIDs): self.justShowTheseIDs = justShowTheseIDs
def setClusterGOElite(self, clusterGOElite): self.clusterGOElite = clusterGOElite
def setStoreGeneSetName(self, geneSetName): self.geneSetName = geneSetName
def StoreGeneSetName(self): return self.geneSetName
def ClusterGOElite(self):
if isinstance(self.clusterGOElite, tuple) or isinstance(self.clusterGOElite, list):
self.clusterGOElite = list(self.clusterGOElite)
if 'None Selected' in self.clusterGOElite: self.clusterGOElite.remove('None Selected')
return self.clusterGOElite
else:
self.clusterGOElite = string.replace(self.clusterGOElite,'None Selected','')
return [self.clusterGOElite]
def PathwaySelect(self):
if isinstance(self._PathwaySelect, tuple) or isinstance(self._PathwaySelect, list):
return tuple(self._PathwaySelect)
else:
return self._PathwaySelect
def setGeneSelection(self,gene): self._gene = gene
def GeneSelection(self):
try:
genes = self._gene
genes = string.replace(genes,'\r', ' ')
genes = string.replace(genes,'\n', ' ')
except Exception:
genes = self._gene
return genes
def JustShowTheseIDs(self):
if 'None Selected' in self.justShowTheseIDs:
return ''
else:
justShowTheseIDs = string.replace(self.justShowTheseIDs,'\r',' ')
justShowTheseIDs = string.replace(justShowTheseIDs,'\n',' ')
justShowTheseIDs = string.split(justShowTheseIDs,' ')
try: justShowTheseIDs.remove('')
except Exception: pass
return justShowTheseIDs
def GetGeneCorrelations(self):
if len(self._gene)>0: return True
else: return False
def FilterByPathways(self):
if self._GeneSet != 'None Selected': return True
else: return False
def setTranspose(self,transpose): self._transpose = transpose
def Transpose(self):
try: return self._transpose
except Exception: return False
def setOntologyID(self,OntologyID): self._OntologyID = OntologyID
def OntologyID(self):
try:
return self._OntologyID
except Exception: return ''
def setIncludeExpIDs(self,IncludeExpIDs): self._IncludeExpIDs = IncludeExpIDs
def IncludeExpIDs(self): return self._IncludeExpIDs
def setNormalize(self,Normalize):
if Normalize == 'NA': Normalize = False
self._Normalize = Normalize
def Normalize(self): return self._Normalize
def setSampleDiscoveryParameters(self,ExpressionCutoff,CountsCutoff,FoldDiff,SamplesDiffering,
removeOutliers,featurestoEvaluate,restrictBy,excludeCellCycle,column_metric,column_method,rho_cutoff):
### For single-cell RNA-Seq data
self.expressionCutoff = ExpressionCutoff
self.countsCutoff = CountsCutoff
self.rho_cutoff = rho_cutoff
self.foldDiff = FoldDiff
self.samplesDiffering = SamplesDiffering
self.featurestoEvaluate = featurestoEvaluate
self.restrictBy = restrictBy
self.excludeCellCycle = excludeCellCycle
self.column_metric = column_metric
self.column_method = column_method
self.removeOutliers = removeOutliers
if len(self._gene)>0:
self._gene = self._gene + ' amplify' ### always amplify the selected genes if any
def setExpressionCutoff(self,expressionCutoff):self.expressionCutoff = expressionCutoff
def setCountsCutoff(self,countsCutoff):self.countsCutoff = countsCutoff
def ExpressionCutoff(self):
try: return float(self.expressionCutoff)
except Exception: return False
def setRhoCutoff(self,rho):
self.rho_cutoff = rho
def RhoCutoff(self):
return float(self.rho_cutoff)
def CountsCutoff(self):
try: return int(float(self.countsCutoff))
except Exception: return False
def FoldDiff(self):
try: return float(self.foldDiff)
except Exception: return False
def SamplesDiffering(self):
try: return int(float(self.samplesDiffering))
except Exception: return False
def amplifyGenes(self):
if (self.FilterByPathways() != '' and self.FilterByPathways() !=False) or (self.GeneSelection() != '' and self.GeneSelection() != ' amplify'):
return True
else: return False
def FeaturestoEvaluate(self): return self.featurestoEvaluate
def RestrictBy(self):
if self.restrictBy == True or self.restrictBy == 'yes':
return 'protein_coding'
else:
return None
def RemoveOutliers(self):
if self.removeOutliers == True or self.removeOutliers == 'yes':
return True
else:
return False
def ExcludeCellCycle(self):
if self.excludeCellCycle == 'stringent' or self.excludeCellCycle == 'strict':
return 'strict' ### Also includes removing drivers correlated to any cell cycle genes, not just in the training set
elif self.excludeCellCycle == False:
return False
elif self.excludeCellCycle == True or self.excludeCellCycle != 'no':
return True
else:
return False
def ColumnMetric(self): return self.column_metric
def ColumnMethod(self): return self.column_method
def MinEvents(self):
return self.SamplesDiffering()-1
def MedEvents(self):
return (self.SamplesDiffering()-1)*2
def getSupportedGeneSetTypes(species,directory):
try:
geneset_types=[]
current_geneset_dirs = unique.read_directory('/AltDatabase/goelite/'+species+'/'+directory)
for geneset_dir in current_geneset_dirs:
geneset_dir = string.join(string.split(geneset_dir,'-')[1:],'-')[:-4] ### remove the prefix gene system
if geneset_dir == 'MAPP': geneset_dir = 'WikiPathways'
if geneset_dir not in geneset_types:
if len(geneset_dir)>1:
geneset_types.append(geneset_dir)
except Exception:
return []
return geneset_types
def getSupportedGeneSystems(species,directory):
system_names=[]
current_system_dirs = unique.read_directory('/AltDatabase/goelite/'+species+'/'+directory)
for system_dir in current_system_dirs:
try:
system_dir = string.split(system_dir,'-')[1][:-4] ### remove the prefix gene system
if len(system_dir)>1:
if system_dir not in system_names:
system_names.append(system_dir)
except Exception: None
system_names.append('Ensembl')
system_names.append('HMDB')
system_names = unique.unique(system_names)
system_names.sort()
return system_names
def listAllGeneSetCategories(species,geneset_type,directory):
geneset_categories=[]
if directory == 'gene-go':
if geneset_type == 'GeneOntology': geneset_type = 'go'
filename = 'AltDatabase/goelite/OBO/builds/'+geneset_type+'_annotations.txt'
index = 1
else:
if geneset_type == 'WikiPathways': geneset_type = 'MAPP'
filename = 'AltDatabase/goelite/'+species+'/'+directory+'/'+'Ensembl-'+geneset_type+'.txt'
index = -1
fn=filepath(filename)
### Imports a geneset category and stores pathway-level names
i=0
for line in open(fn,'rU').xreadlines():
if i==0: i=1 ### Skip the header
else:
data = cleanUpLine(line)
geneset_category = string.split(data,'\t')[index]
if geneset_category not in geneset_categories:
geneset_categories.append(geneset_category)
geneset_categories.sort()
return geneset_categories
def getValidExpFile(altanalyze_rawexp_dir):
dir_files = read_directory(altanalyze_rawexp_dir)
valid_file = ''
for file in dir_files:
if 'exp.' in file and 'state.txt' not in file and 'highExp' not in file:
valid_file = altanalyze_rawexp_dir+'/'+file
break
return valid_file
def getValidSplicingScoreFile(altanalyze_rawsplice_dir):
valid_dirs = ['splicing-index','FIRMA','ASPIRE','linearregres']
dir_files = read_directory(altanalyze_rawsplice_dir)
valid_folder = None
for folder in valid_dirs:
if folder in dir_files:
valid_folder = folder
break
valid_file_dir = ''
primary=''
if valid_folder != None:
child_dir = altanalyze_rawsplice_dir+'/'+valid_folder
dir_files = read_directory(altanalyze_rawsplice_dir+'/'+valid_folder)
for file in dir_files:
if '.txt' in file:
valid_file_dir = child_dir+'/'+file
if '_vs_' not in file: ### You can have a folder with pairwise comps and all groups
primary = child_dir+'/'+file
if len(primary)!=0: valid_file_dir = primary
return valid_file_dir
def downloadInteractionDBs(species,windowType):
analysis = 'getAdditionalOnlineResources' ### same option as updating gene-sets
additional_resources=['Latest WikiPathways','KEGG','BioGRID','DrugBank','miRNA Targets','Transcription Factor Targets']
get_additional = 'customSet',additional_resources
values = species,get_additional
StatusWindow(values,analysis,windowType=windowType) ### open in a TopLevel TK window (don't close current option selection menu)
if __name__ == '__main__':
dir = '/Users/saljh8/Desktop/dataAnalysis/FuKun/AltResults/Clustering/Combined-junction-exon-evidence.txt'
#a = exportJunctionList(dir,limit=50)
#print a;sys.exit()
try:
import multiprocessing as mlp
mlp.freeze_support()
except Exception:
print 'Note: Multiprocessing not supported for this verison python.'
mlp=None
#getUpdatedParameters(array_type,species,run_from_scratch,file_dirs)
a = getUserParameters('yes')
| kdaily/altanalyze | UI.py | Python | apache-2.0 | 382,733 | [
"Cytoscape"
] | dcc010ab148e0e1c01b269c3abbdc34ea17d07dd84c252d1ff02a38ef8574f9d |
# -*- Mode: Python; coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2014 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import logging
from stoqlib.gui.events import PrintReportEvent
from stoqlib.gui.utils.printing import print_report
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.reporting.workorder import WorkOrderQuoteReport
from .bikeshopreport import BikeShopWorkOrderQuoteReport
_ = stoqlib_gettext
log = logging.getLogger(__name__)
class BikeShopUI(object):
def __init__(self):
PrintReportEvent.connect(self._on_PrintReportEvent)
#
# Events
#
def _on_PrintReportEvent(self, report_class, *args, **kwargs):
if report_class is WorkOrderQuoteReport:
print_report(BikeShopWorkOrderQuoteReport, *args)
return True
| andrebellafronte/stoq | plugins/bikeshop/bikeshopui.py | Python | gpl-2.0 | 1,591 | [
"VisIt"
] | fcd66eeaddf6326724f81546957542ada19c549c279f87dfedfdbee410558532 |
'''
Functions and classes for generating 1D random fields.
The functions **randn1d** and **multirandn1d** are similar to the
**numpy.random.randn** and **np.random.multivariate_normal** functions.
If a large number of random fields are required (e.g. for RFT validations)
it may be more efficient to use the **Generator1D** and **GeneratorMulti1D** classes.
'''
# Copyright (C) 2016 Todd Pataky
from math import sqrt,log
import numpy as np
from scipy.ndimage import gaussian_filter1d
eps = np.finfo(float).eps #smallest float
class Generator1D(object):
'''
Generator of smooth Gaussian random fields.
:Parameters:
*nResponses* -- number of fields (int)
*nodes* -- number of field nodes (int) OR a binary field (boolean array)
*FWHM* -- field smoothness (float)
*pad* -- pad prior to smoothing (bool)
:Returns:
A Generator1D object
:Notes:
1. Generator1D is faster than randn1d for iteratively generating many random samples.
:Examples:
>>> g = rft1d.random.Generator1D(8, 101, 15.0)
>>> y = g.generate_sample()
'''
def __init__(self, nResponses=1, nodes=101, FWHM=10, pad=False):
super(Generator1D, self).__init__()
self.FWHM = float(FWHM)
self.SCALE = None #scale factor to return smoothed data to unit variance
self.SD = None #standard deviation of the Gaussian kernel
self.i0 = None #first node, post-smoothed data
self.i1 = None #last node, post-smoothed data (i1-i0 = nodes)
self.mask = None
self.nResponses = int(nResponses)
self.nNodes = None
self.pad = bool(pad)
self.q = None #number of nodes used for pre-smoothed data
self._parse_nodes_argument(nodes)
self.shape = self.nResponses, self.nNodes
self.set_fwhm(self.FWHM)
def __repr__(self):
s = ''
s += 'RFT1D Generator1D:\n'
s += ' nResponses : %d\n' %self.nResponses
s += ' nNodes : %d\n' %self.nNodes
s += ' FWHM : %.1f\n' %self.FWHM
s += ' pad : %s\n' %self.pad
return s
def _parse_nodes_argument(self, nodes):
if isinstance(nodes, int):
self.nNodes = nodes
elif np.ma.is_mask(nodes):
if nodes.ndim!=1:
raise( ValueError('RFT1D Error: the "nodes" argument must be a 1D boolean array. Received a %dD array'%arg.ndim) )
self.nNodes = nodes.size
self.mask = np.logical_not(nodes)
else:
raise( ValueError('RFT1D Error: the "nodes" argument must be an integer or a 1D boolean array') )
def _set_scale(self):
'''
Compute the scaling factor for restoring a smoothed curve to unit variance.
This code is modified from "randomtalk.m" by Matthew Brett (Oct 1999)
Downloaded from http://www.fil.ion.ucl.ac.uk/~wpenny/mbi/index.html on 1 Aug 2014
'''
if np.isinf(self.FWHM):
self.SCALE = None
else:
t = np.arange( -0.5*(self.nNodes-1) , 0.5*(self.nNodes-1)+1 )
gf = np.exp(-(t**2) / (2*self.SD**2 + eps))
gf /= gf.sum()
# expected variance for this kernel
AG = np.fft.fft(gf)
Pag = AG * np.conj(AG) #power of the noise
COV = np.real( np.fft.ifft(Pag) )
svar = COV[0]
self.SCALE = sqrt(1.0/svar)
def _set_qi0i1(self, w):
if np.isinf(w):
self.q = self.i0 = self.i1 = None
elif self.pad:
n = self.nNodes
if w<3:
q = 2*n
else:
q = 10*n
if w>50:
q += n*(w-50)
self.q = int(q)
self.i0 = self.q/2 - n/2
self.i1 = self.i0 + n
else:
self.q = self.nNodes
self.i0 = 0
self.i1 = self.nNodes
self.i0 = int(self.i0)
self.i1 = int(self.i1)
def _smooth(self, y):
return self.SCALE*gaussian_filter1d(y, self.SD, axis=1, mode='wrap')
def generate_sample(self):
if self.FWHM==0:
y = np.random.randn(*self.shape)
elif np.isinf(self.FWHM):
y = np.random.randn(self.nResponses)
y = ( y*np.ones( tuple(self.shape) ).T ).T
else:
y = np.random.randn(self.nResponses, self.q)
y = self._smooth(y)
y = y[:,self.i0:self.i1]
if self.mask is not None:
y[:,self.mask] = np.nan
return y
def set_fwhm(self, fwhm):
self.FWHM = float(fwhm)
self.SD = self.FWHM / sqrt(8*log(2))
self._set_scale()
self._set_qi0i1(self.FWHM)
class GeneratorMulti1D(Generator1D):
'''
Generator of smooth multivariate Gaussian random fields.
:Parameters:
*nResponses* -- number of fields (int)
*nodes* -- number of field nodes (int) OR a binary field (boolean array)
*nComponents* -- number of vector components (int)
*FWHM* -- field smoothness (float)
*W* -- covariance matrix (*nComponents* x *nComponents* array)
*pad* -- pad prior to smoothing (bool)
:Returns:
A GeneratorMulti1D object
:Notes:
1. GeneratorMulti1D is faster than multirandn1d for iteratively generating many random samples.
:Examples:
>>> g = rft1d.random.GeneratorMulti1D(8, 101, 3, 15.0)
>>> y = g.generate_sample()
'''
def __init__(self, nResponses=1, nodes=101, nComponents=2, FWHM=10, W=None, pad=False):
super(GeneratorMulti1D, self).__init__(nResponses, nodes, FWHM, pad)
self.nComponents = int(nComponents)
if W is None:
self.W = np.eye(self.nComponents)
else:
self.W = np.asarray(W, dtype=float)
self.shape = self.nResponses, self.nNodes, self.nComponents
self.mu = np.array([0]*self.nComponents)
def __repr__(self):
s = ''
s += 'RFT1D Generator1D:\n'
s += ' nResponses : %d\n' %self.nResponses
s += ' nNodes : %d\n' %self.nNodes
s += ' nComponents : %d\n' %self.nComponents
s += ' FWHM : %.1f\n' %self.FWHM
s += ' W : (%dx%d array)\n' %self.W.shape
s += ' pad : %s\n' %self.pad
return s
def generate_sample(self):
if self.FWHM==0:
y = np.random.multivariate_normal(self.mu, self.W, (self.nResponses,self.q))
elif np.isinf(self.FWHM):
y = np.random.multivariate_normal(self.mu, self.W, (self.nResponses,))
y = np.dstack( [ (yy*np.ones( (self.nResponses,self.nNodes) ).T).T for yy in y.T] )
else:
y = np.random.multivariate_normal(self.mu, self.W, (self.nResponses,self.q))
y = self._smooth(y)
y = y[:,self.i0:self.i1,:]
if self.mask is not None:
y[:,self.mask,:] = np.nan
return y
def multirandn1d(nResponses, nodes, nComponents, FWHM=10.0, W=None, pad=False):
'''
Generate smooth Gaussian multivariate random fields.
:Parameters:
*nResponses* -- number of fields (int)
*nodes* -- number of field nodes (int) OR a binary field (boolean array)
*nComponents* -- number of vector components (int)
*FWHM* -- field smoothness (float)
*W* -- covariance matrix (*nComponents* x *nComponents* array)
*pad* -- pad prior to smoothing (bool)
:Returns:
A 3D numpy array with shape: (*nResponses*, *nodes*, *nComponents*)
:Notes:
1. The default *W* is the identity matrix.
2. Padding is slow but necessary when 2 *FWHM* > *nodes*
:Examples:
>>> y = rft1d.random.multirandn1d(8, 101, 3, 15.0)
>>> y = rft1d.random.multirandn1d(1000, 101, 5, 65.0, W=np.eye(5), pad=True)
'''
g = GeneratorMulti1D(nResponses, nodes, nComponents, FWHM, W, pad)
y = g.generate_sample()
return y
def randn1d(nResponses, nodes, FWHM=10.0, pad=False):
'''
Generate smooth Gaussian random fields.
:Parameters:
*nResponses* -- number of fields (int)
*nodes* -- number of field nodes (int) OR a binary field (boolean array)
*FWHM* -- field smoothness (float)
*pad* -- pad prior to smoothing (bool)
:Returns:
A 2D numpy array with shape: (*nResponses*, *nodes*)
:Examples:
>>> y = rft1d.random.randn1d(8, 101, 15.0)
>>> y = rft1d.random.randn1d(1000, 101, 75.0, pad=True)
.. warning:: Padding is slow but necessary when 2 *FWHM* > *nodes*
'''
g = Generator1D(nResponses, nodes, FWHM, pad)
y = g.generate_sample()
if nResponses==1:
y = y.flatten()
return y
| 0todd0000/spm1d | spm1d/rft1d/random.py | Python | gpl-3.0 | 8,036 | [
"Gaussian"
] | b5d4fc840a5f7c8e57de180a38ec6480dff034e2620bf2e43f7f69f618e9541f |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SQRL Server (Python) documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 28 05:29:34 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
sys.path.append(os.path.abspath('..'))
from sqrlserver import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
napoleon_google_docstring = True
napoleon_include_init_with_doc = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'SQRL Server'
copyright = '2017, Aaron Dalton'
author = 'Aaron Dalton'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'SQRLServerPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SQRLServerPython.tex', 'SQRL Server (Python) Documentation',
'Aaron Dalton', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sqrlserverpython', 'SQRL Server (Python) Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SQRLServerPython', 'SQRL Server (Python) Documentation',
author, 'SQRLServerPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| Perlkonig/sqrlserver-python | docs/conf.py | Python | mit | 9,574 | [
"Dalton"
] | 82340f4c21121fc046b3329d8ec63fea3e38a0900776e89e853114dedd501da7 |
"""This module contains the "Viz" objects
These objects represent the backend of all the visualizations that
Superset can render.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import hashlib
import logging
import traceback
import uuid
import zlib
from collections import OrderedDict, defaultdict
from itertools import product
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from flask import request
from flask_babel import lazy_gettext as _
from markdown import markdown
import simplejson as json
from six import string_types, PY3
from dateutil import relativedelta as rdelta
from superset import app, utils, cache, get_manifest_file
from superset.utils import DTTM_ALIAS
config = app.config
stats_logger = config.get('STATS_LOGGER')
class BaseViz(object):
"""All visualizations derive this base class"""
viz_type = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
def __init__(self, datasource, form_data):
if not datasource:
raise Exception(_("Viz is missing a datasource"))
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.form_data = form_data
self.query = ""
self.token = self.form_data.get(
'token', 'token_' + uuid.uuid4().hex[:8])
self.metrics = self.form_data.get('metrics') or []
self.groupby = self.form_data.get('groupby') or []
self.status = None
self.error_message = None
def get_df(self, query_obj=None):
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
self.error_msg = ""
self.results = None
timestamp_format = None
if self.datasource.type == 'table':
dttm_col = self.datasource.get_col(query_obj['granularity'])
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.error_message = self.results.error_message
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if df is None or df.empty:
self.status = utils.QueryStatus.FAILED
if not self.error_message:
self.error_message = "No data."
return pd.DataFrame()
else:
if DTTM_ALIAS in df.columns:
if timestamp_format in ("epoch_s", "epoch_ms"):
df[DTTM_ALIAS] = pd.to_datetime(df[DTTM_ALIAS], utc=False)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df.replace([np.inf, -np.inf], np.nan)
df = df.fillna(0)
return df
def get_extra_filters(self):
extra_filters = self.form_data.get('extra_filters', [])
return {f['col']: f['val'] for f in extra_filters}
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
gb = form_data.get("groupby") or []
metrics = form_data.get("metrics") or []
columns = form_data.get("columns") or []
groupby = []
for o in gb + columns:
if o not in groupby:
groupby.append(o)
is_timeseries = self.is_timeseries
if DTTM_ALIAS in groupby:
groupby.remove(DTTM_ALIAS)
is_timeseries = True
# extra_filters are temporary/contextual filters that are external
# to the slice definition. We use those for dynamic interactive
# filters like the ones emitted by the "Filter Box" visualization
extra_filters = self.get_extra_filters()
granularity = (
form_data.get("granularity") or form_data.get("granularity_sqla")
)
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(
form_data.get("row_limit") or config.get("ROW_LIMIT"))
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
since = (
extra_filters.get('__from') or
form_data.get("since") or
config.get("SUPERSET_DEFAULT_SINCE", "1 year ago")
)
from_dttm = utils.parse_human_datetime(since)
now = datetime.now()
if from_dttm > now:
from_dttm = now - (from_dttm - now)
until = extra_filters.get('__to') or form_data.get("until", "now")
to_dttm = utils.parse_human_datetime(until)
if from_dttm > to_dttm:
raise Exception(_("From date cannot be larger than to date"))
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
'where': form_data.get("where", ''),
'having': form_data.get("having", ''),
'having_druid': form_data.get('having_filters') \
if 'having_filters' in form_data else [],
'time_grain_sqla': form_data.get("time_grain_sqla", ''),
'druid_time_origin': form_data.get("druid_time_origin", ''),
}
filters = form_data['filters'] if 'filters' in form_data \
else []
for col, vals in self.get_extra_filters().items():
if not (col and vals) or col.startswith('__'):
continue
elif col in self.datasource.filterable_column_names:
# Quote values with comma to avoid conflict
filters += [{
'col': col,
'op': 'in',
'val': vals,
}]
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': filters,
'timeseries_limit': limit,
'extras': extras,
'timeseries_limit_metric': timeseries_limit_metric,
'form_data': form_data,
}
return d
@property
def cache_timeout(self):
if self.form_data.get('cache_timeout'):
return int(self.form_data.get('cache_timeout'))
if self.datasource.cache_timeout:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, 'database') and
self.datasource.database.cache_timeout):
return self.datasource.database.cache_timeout
return config.get("CACHE_DEFAULT_TIMEOUT")
def get_json(self, force=False):
return json.dumps(
self.get_payload(force),
default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def cache_key(self):
s = str([(k, self.form_data[k]) for k in sorted(self.form_data.keys())])
return hashlib.md5(s.encode('utf-8')).hexdigest()
def get_payload(self, force=False):
"""Handles caching around the json payload retrieval"""
cache_key = self.cache_key
payload = None
force = force if force else self.form_data.get('force') == 'true'
if not force and cache:
payload = cache.get(cache_key)
if payload:
stats_logger.incr('loaded_from_source')
is_cached = True
try:
cached_data = zlib.decompress(payload)
if PY3:
cached_data = cached_data.decode('utf-8')
payload = json.loads(cached_data)
except Exception as e:
logging.error("Error reading cache: " +
utils.error_msg_from_exception(e))
payload = None
logging.info("Serving from cache")
if not payload:
stats_logger.incr('loaded_from_cache')
data = None
is_cached = False
cache_timeout = self.cache_timeout
stacktrace = None
try:
df = self.get_df()
if not self.error_message:
data = self.get_data(df)
except Exception as e:
logging.exception(e)
if not self.error_message:
self.error_message = str(e)
self.status = utils.QueryStatus.FAILED
data = None
stacktrace = traceback.format_exc()
payload = {
'cache_key': cache_key,
'cache_timeout': cache_timeout,
'data': data,
'error': self.error_message,
'form_data': self.form_data,
'query': self.query,
'status': self.status,
'stacktrace': stacktrace,
}
payload['cached_dttm'] = datetime.utcnow().isoformat().split('.')[0]
logging.info("Caching for the next {} seconds".format(
cache_timeout))
data = self.json_dumps(payload)
if PY3:
data = bytes(data, 'utf-8')
if cache and self.status != utils.QueryStatus.FAILED:
try:
cache.set(
cache_key,
zlib.compress(data),
timeout=cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning("Could not cache key {}".format(cache_key))
logging.exception(e)
cache.delete(cache_key)
payload['is_cached'] = is_cached
return payload
def json_dumps(self, obj):
return json.dumps(obj, default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def data(self):
"""This is the data object serialized to the js layer"""
content = {
'form_data': self.form_data,
'token': self.token,
'viz_name': self.viz_type,
'filter_select_enabled': self.datasource.filter_select_enabled,
}
return content
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, encoding="utf-8")
def get_data(self, df):
return []
@property
def json_data(self):
return json.dumps(self.data)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def should_be_timeseries(self):
fd = self.form_data
# TODO handle datasource-type-specific code in datasource
conditions_met = (
(fd.get('granularity') and fd.get('granularity') != 'all') or
(fd.get('granularity_sqla') and fd.get('time_grain_sqla'))
)
if fd.get('include_time') and not conditions_met:
raise Exception(_(
"Pick a granularity in the Time section or "
"uncheck 'Include Time'"))
return fd.get('include_time')
def query_obj(self):
d = super(TableViz, self).query_obj()
fd = self.form_data
if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
raise Exception(_(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both"))
if fd.get('all_columns'):
d['columns'] = fd.get('all_columns')
d['groupby'] = []
order_by_cols = fd.get('order_by_cols') or []
d['orderby'] = [json.loads(t) for t in order_by_cols]
d['is_timeseries'] = self.should_be_timeseries()
return d
def get_data(self, df):
if not self.should_be_timeseries() and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
return dict(
records=df.to_dict(orient="records"),
columns=list(df.columns),
)
def json_dumps(self, obj):
if self.form_data.get('all_columns'):
return json.dumps(obj, default=utils.json_iso_dttm_ser)
else:
return super(TableViz, self).json_dumps(obj)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super(PivotTableViz, self).query_obj()
groupby = self.form_data.get('groupby')
columns = self.form_data.get('columns')
metrics = self.form_data.get('metrics')
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise Exception(_("Please choose at least one \"Group by\" field "))
if not metrics:
raise Exception(_("Please choose at least one metric"))
if (
any(v in groupby for v in columns) or
any(v in columns for v in groupby)):
raise Exception(_("'Group By' and 'Columns' can't overlap"))
return d
def get_data(self, df):
if (
self.form_data.get("granularity") == "all" and
DTTM_ALIAS in df):
del df[DTTM_ALIAS]
df = df.pivot_table(
index=self.form_data.get('groupby'),
columns=self.form_data.get('columns'),
values=self.form_data.get('metrics'),
aggfunc=self.form_data.get('pandas_aggfunc'),
margins=self.form_data.get('pivot_margins'),
)
# Display metrics side by side with each column
if self.form_data.get('combine_metric'):
df = df.stack(0).unstack()
return dict(
columns=list(df.columns),
html=df.to_html(
na_rep='',
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover").split(" ")),
)
class MarkupViz(BaseViz):
"""Use html or markdown to create a free form widget"""
viz_type = "markup"
verbose_name = _("Markup")
is_timeseries = False
def get_df(self):
return True
def get_data(self, df):
markup_type = self.form_data.get("markup_type")
code = self.form_data.get("code", '')
if markup_type == "markdown":
code = markdown(code)
return dict(html=code, theme_css=get_manifest_file('theme.css'))
class SeparatorViz(MarkupViz):
"""Use to create section headers in a dashboard, similar to `Markup`"""
viz_type = "separator"
verbose_name = _("Separator")
class WordCloudViz(BaseViz):
"""Build a colorful word cloud
Uses the nice library at:
https://github.com/jasondavies/d3-cloud
"""
viz_type = "word_cloud"
verbose_name = _("Word Cloud")
is_timeseries = False
def query_obj(self):
d = super(WordCloudViz, self).query_obj()
d['metrics'] = [self.form_data.get('metric')]
d['groupby'] = [self.form_data.get('series')]
return d
def get_data(self, df):
# Ordering the columns
df = df[[self.form_data.get('series'), self.form_data.get('metric')]]
# Labeling the columns for uniform json schema
df.columns = ['text', 'size']
return df.to_dict(orient="records")
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
def _nest(self, metric, df):
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v}
for n, v in zip(df.index, df[metric])]
else:
result = [{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]]
return result
def get_data(self, df):
df = df.set_index(self.form_data.get("groupby"))
chart_data = [{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calendar Heatmap")
credits = (
'<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>')
is_timeseries = True
def get_data(self, df):
form_data = self.form_data
df.columns = ["timestamp", "metric"]
timestamps = {str(obj["timestamp"].value / 10**9):
obj.get("metric") for obj in df.to_dict("records")}
start = utils.parse_human_datetime(form_data.get("since"))
end = utils.parse_human_datetime(form_data.get("until"))
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24*60*60) + 1
else:
range_ = diff_secs // (60*60) + 1
return {
"timestamps": timestamps,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self):
qry = super(CalHeatmapViz, self).query_obj()
qry["metrics"] = [self.form_data["metric"]]
return qry
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BoxPlotViz(NVD3Viz):
"""Box plot viz from ND3"""
viz_type = "box_plot"
verbose_name = _("Box Plot")
sort_series = False
is_timeseries = True
def to_series(self, df, classed='', title_suffix=''):
label_sep = " - "
chart_data = []
for index_value, row in zip(df.index, df.to_dict(orient="records")):
if isinstance(index_value, tuple):
index_value = label_sep.join(index_value)
boxes = defaultdict(dict)
for (label, key), value in row.items():
if key == "median":
key = "Q2"
boxes[label][key] = value
for label, box in boxes.items():
if len(self.form_data.get("metrics")) > 1:
# need to render data labels with metrics
chart_label = label_sep.join([index_value, label])
else:
chart_label = index_value
chart_data.append({
"label": chart_label,
"values": box,
})
return chart_data
def get_data(self, df):
form_data = self.form_data
df = df.fillna(0)
# conform to NVD3 names
def Q1(series): # need to be named functions - can't use lambdas
return np.percentile(series, 25)
def Q3(series):
return np.percentile(series, 75)
whisker_type = form_data.get('whisker_options')
if whisker_type == "Tukey":
def whisker_high(series):
upper_outer_lim = Q3(series) + 1.5 * (Q3(series) - Q1(series))
series = series[series <= upper_outer_lim]
return series[np.abs(series - upper_outer_lim).argmin()]
def whisker_low(series):
lower_outer_lim = Q1(series) - 1.5 * (Q3(series) - Q1(series))
# find the closest value above the lower outer limit
series = series[series >= lower_outer_lim]
return series[np.abs(series - lower_outer_lim).argmin()]
elif whisker_type == "Min/max (no outliers)":
def whisker_high(series):
return series.max()
def whisker_low(series):
return series.min()
elif " percentiles" in whisker_type:
low, high = whisker_type.replace(" percentiles", "").split("/")
def whisker_high(series):
return np.percentile(series, int(high))
def whisker_low(series):
return np.percentile(series, int(low))
else:
raise ValueError("Unknown whisker type: {}".format(whisker_type))
def outliers(series):
above = series[series > whisker_high(series)]
below = series[series < whisker_low(series)]
# pandas sometimes doesn't like getting lists back here
return set(above.tolist() + below.tolist())
aggregate = [Q1, np.median, Q3, whisker_high, whisker_low, outliers]
df = df.groupby(form_data.get('groupby')).agg(aggregate)
chart_data = self.to_series(df)
return chart_data
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super(BubbleViz, self).query_obj()
d['groupby'] = [
form_data.get('entity')
]
if form_data.get('series'):
d['groupby'].append(form_data.get('series'))
self.x_metric = form_data.get('x')
self.y_metric = form_data.get('y')
self.z_metric = form_data.get('size')
self.entity = form_data.get('entity')
self.series = form_data.get('series') or self.entity
d['row_limit'] = form_data.get('limit')
d['metrics'] = [
self.z_metric,
self.x_metric,
self.y_metric,
]
if not all(d['metrics'] + [self.entity]):
raise Exception(_("Pick a metric for x, y and size"))
return d
def get_data(self, df):
df['x'] = df[[self.x_metric]]
df['y'] = df[[self.y_metric]]
df['size'] = df[[self.z_metric]]
df['shape'] = 'circle'
df['group'] = df[[self.series]]
series = defaultdict(list)
for row in df.to_dict(orient='records'):
series[row['group']].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({
'key': k,
'values': v})
return chart_data
class BulletViz(NVD3Viz):
"""Based on the NVD3 bullet chart"""
viz_type = "bullet"
verbose_name = _("Bullet Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super(BulletViz, self).query_obj()
self.metric = form_data.get('metric')
def as_strings(field):
value = form_data.get(field)
return value.split(',') if value else []
def as_floats(field):
return [float(x) for x in as_strings(field)]
self.ranges = as_floats('ranges')
self.range_labels = as_strings('range_labels')
self.markers = as_floats('markers')
self.marker_labels = as_strings('marker_labels')
self.marker_lines = as_floats('marker_lines')
self.marker_line_labels = as_strings('marker_line_labels')
d['metrics'] = [
self.metric,
]
if not self.metric:
raise Exception(_("Pick a metric to display"))
return d
def get_data(self, df):
df = df.fillna(0)
df['metric'] = df[[self.metric]]
values = df['metric'].values
return {
'measures': values.tolist(),
'ranges': self.ranges or [0, values.max() * 1.1],
'rangeLabels': self.range_labels or None,
'markers': self.markers or None,
'markerLabels': self.marker_labels or None,
'markerLines': self.marker_lines or None,
'markerLineLabels': self.marker_line_labels or None,
}
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self):
d = super(BigNumberViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception(_("Pick a metric!"))
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self, df):
form_data = self.form_data
df.sort_values(by=df.columns[0], inplace=True)
compare_lag = form_data.get("compare_lag")
return {
'data': df.values.tolist(),
'compare_lag': compare_lag,
'compare_suffix': form_data.get('compare_suffix', ''),
}
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super(BigNumberTotalViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception(_("Pick a metric!"))
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self, df):
form_data = self.form_data
df.sort_values(by=df.columns[0], inplace=True)
return {
'data': df.values.tolist(),
'subheader': form_data.get('subheader', ''),
}
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
def to_series(self, df, classed='', title_suffix=''):
cols = []
for col in df.columns:
if col == '':
cols.append('N/A')
elif col is None:
cols.append('NULL')
else:
cols.append(col)
df.columns = cols
series = df.to_dict('series')
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
if isinstance(name, string_types):
series_title = name
else:
name = ["{}".format(s) for s in name]
if len(self.form_data.get('metrics')) > 1:
series_title = ", ".join(name)
else:
series_title = ", ".join(name[1:])
if title_suffix:
series_title += title_suffix
d = {
"key": series_title,
"classed": classed,
"values": [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.index
],
}
chart_data.append(d)
return chart_data
def process_data(self, df):
fd = self.form_data
df = df.fillna(0)
if fd.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get('groupby'),
values=fd.get('metrics'))
fm = fd.get("resample_fillmethod")
if not fm:
fm = None
how = fd.get("resample_how")
rule = fd.get("resample_rule")
if how and rule:
df = df.resample(rule, how=how, fill_method=fm)
if not fm:
df = df.fillna(0)
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
if fd.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
rolling_periods = fd.get("rolling_periods")
rolling_type = fd.get("rolling_type")
if rolling_type in ('mean', 'std', 'sum') and rolling_periods:
if rolling_type == 'mean':
df = pd.rolling_mean(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'std':
df = pd.rolling_std(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'sum':
df = pd.rolling_sum(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'cumsum':
df = df.cumsum()
num_period_compare = fd.get("num_period_compare")
if num_period_compare:
num_period_compare = int(num_period_compare)
prt = fd.get('period_ratio_type')
if prt and prt == 'growth':
df = (df / df.shift(num_period_compare)) - 1
elif prt and prt == 'value':
df = df - df.shift(num_period_compare)
else:
df = df / df.shift(num_period_compare)
df = df[num_period_compare:]
return df
def get_data(self, df):
fd = self.form_data
df = self.process_data(df)
chart_data = self.to_series(df)
time_compare = fd.get('time_compare')
if time_compare:
query_object = self.query_obj()
delta = utils.parse_human_timedelta(time_compare)
query_object['inner_from_dttm'] = query_object['from_dttm']
query_object['inner_to_dttm'] = query_object['to_dttm']
query_object['from_dttm'] -= delta
query_object['to_dttm'] -= delta
df2 = self.get_df(query_object)
df2[DTTM_ALIAS] += delta
df2 = self.process_data(df2)
chart_data += self.to_series(
df2, classed='superset', title_suffix="---")
chart_data = sorted(chart_data, key=lambda x: x['key'])
return chart_data
class NVD3DualLineViz(NVD3Viz):
"""A rich line chart with dual axis"""
viz_type = "dual_line"
verbose_name = _("Time Series - Dual Axis Line Chart")
sort_series = False
is_timeseries = True
def query_obj(self):
d = super(NVD3DualLineViz, self).query_obj()
m1 = self.form_data.get('metric')
m2 = self.form_data.get('metric_2')
d['metrics'] = [m1, m2]
if not m1:
raise Exception(_("Pick a metric for left axis!"))
if not m2:
raise Exception(_("Pick a metric for right axis!"))
if m1 == m2:
raise Exception(_("Please choose different metrics"
" on left and right axis"))
return d
def to_series(self, df, classed=''):
cols = []
for col in df.columns:
if col == '':
cols.append('N/A')
elif col is None:
cols.append('NULL')
else:
cols.append(col)
df.columns = cols
series = df.to_dict('series')
chart_data = []
metrics = [
self.form_data.get('metric'),
self.form_data.get('metric_2')
]
for i, m in enumerate(metrics):
ys = series[m]
if df[m].dtype.kind not in "biufc":
continue
series_title = m
d = {
"key": series_title,
"classed": classed,
"values": [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.index
],
"yAxis": i+1,
"type": "line"
}
chart_data.append(d)
return chart_data
def get_data(self, df):
fd = self.form_data
df = df.fillna(0)
if self.form_data.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
metric = fd.get('metric')
metric_2 = fd.get('metric_2')
df = df.pivot_table(
index=DTTM_ALIAS,
values=[metric, metric_2])
chart_data = self.to_series(df)
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = 'compare'
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
class DistributionPieViz(NVD3Viz):
"""Annoy visualization snobs with this controversial pie chart"""
viz_type = "pie"
verbose_name = _("Distribution - NVD3 - Pie Chart")
is_timeseries = False
def get_data(self, df):
groupName = df.columns[0]
polymerization = df.columns[1]
if groupName.lower() in self.groupby[0].lower():
self.groupby[0] = groupName
if polymerization.lower() in self.metrics[0].lower():
self.metrics[0] = polymerization
df = df.pivot_table(
index=self.groupby,
values=[self.metrics[0]])
df.sort_values(by=self.metrics[0], ascending=False, inplace=True)
df = df.reset_index()
df.columns = ['x', 'y']
return df.to_dict(orient="records")
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
def query_obj(self):
"""Returns the query object for this visualization"""
d = super(HistogramViz, self).query_obj()
d['row_limit'] = self.form_data.get(
'row_limit', int(config.get('VIZ_ROW_LIMIT')))
numeric_column = self.form_data.get('all_columns_x')
if numeric_column is None:
raise Exception(_("Must have one numeric column specified"))
d['columns'] = [numeric_column]
return d
def get_data(self, df):
"""Returns the chart data"""
chart_data = df[df.columns[0]].values.tolist()
return chart_data
class DistributionBarViz(DistributionPieViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
def query_obj(self):
d = super(DistributionBarViz, self).query_obj() # noqa
fd = self.form_data
if (
len(d['groupby']) <
len(fd.get('groupby') or []) + len(fd.get('columns') or [])
):
raise Exception(
_("Can't have overlap between Series and Breakdowns"))
if not fd.get('metrics'):
raise Exception(_("Pick at least one metric"))
if not fd.get('groupby'):
raise Exception(_("Pick at least one field for [Series]"))
return d
def get_data(self, df):
fd = self.form_data
row = df.groupby(self.groupby).sum()[self.metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
columns = fd.get('columns') or []
pt = df.pivot_table(
index=self.groupby,
columns=columns,
values=self.metrics)
if fd.get("contribution"):
pt = pt.fillna(0)
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
chart_data = []
for name, ys in pt.iteritems():
if pt[name].dtype.kind not in "biufc" or name in self.groupby:
continue
if isinstance(name, string_types):
series_title = name
elif len(self.metrics) > 1:
series_title = ", ".join(name)
else:
l = [str(s) for s in name[1:]]
series_title = ", ".join(l)
values = []
for i, v in ys.iteritems():
x = i
if isinstance(x, (tuple, list)):
x = ', '.join([str(s) for s in x])
else:
x = str(x)
values.append({
'x': x,
'y': v,
})
d = {
"key": series_title,
"values": values,
}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
'Kerry Rodden '
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>')
def get_data(self, df):
# if m1 == m2 duplicate the metric column
cols = self.form_data.get('groupby')
metric = self.form_data.get('metric')
secondary_metric = self.form_data.get('secondary_metric')
if metric == secondary_metric:
ndf = df
ndf.columns = [cols + ['m1', 'm2']]
else:
cols += [
self.form_data['metric'], self.form_data['secondary_metric']]
ndf = df[cols]
return json.loads(ndf.to_json(orient="values")) # TODO fix this nonsense
def query_obj(self):
qry = super(SunburstViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
def query_obj(self):
qry = super(SankeyViz, self).query_obj()
if len(qry['groupby']) != 2:
raise Exception(_("Pick exactly 2 columns as [Source / Target]"))
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
recs = df.to_dict(orient='records')
hierarchy = defaultdict(set)
for row in recs:
hierarchy[row['source']].add(row['target'])
def find_cycle(g):
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
for v in g:
cycle = visit(v)
if cycle:
return cycle
cycle = find_cycle(hierarchy)
if cycle:
raise Exception(_(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}").format(cycle))
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
def query_obj(self):
qry = super(DirectedForceViz, self).query_obj()
if len(self.form_data['groupby']) != 2:
raise Exception(_("Pick exactly 2 columns to 'Group By'"))
qry['metrics'] = [self.form_data['metric']]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
return df.to_dict(orient='records')
class ChordViz(BaseViz):
"""A Chord diagram"""
viz_type = "chord"
verbose_name = _("Directed Force Layout")
credits = '<a href="https://github.com/d3/d3-chord">Bostock</a>'
is_timeseries = False
def query_obj(self):
qry = super(ChordViz, self).query_obj()
fd = self.form_data
qry['groupby'] = [fd.get('groupby'), fd.get('columns')]
qry['metrics'] = [fd.get('metric')]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
# Preparing a symetrical matrix like d3.chords calls for
nodes = list(set(df['source']) | set(df['target']))
matrix = {}
for source, target in product(nodes, nodes):
matrix[(source, target)] = 0
for source, target, value in df.to_records(index=False):
matrix[(source, target)] = value
m = [[matrix[(n1, n2)] for n1 in nodes] for n2 in nodes]
return {
'nodes': list(nodes),
'matrix': m,
}
class CountryMapViz(BaseViz):
"""A country centric"""
viz_type = "country_map"
verbose_name = _("Country Map")
is_timeseries = False
credits = 'From bl.ocks.org By john-guerra'
def query_obj(self):
qry = super(CountryMapViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric']]
qry['groupby'] = [self.form_data['entity']]
return qry
def get_data(self, df):
from superset.data import countries
fd = self.form_data
cols = [fd.get('entity')]
metric = fd.get('metric')
cols += [metric]
ndf = df[cols]
df = ndf
df.columns = ['country_id', 'metric']
d = df.to_dict(orient='records')
return d
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
def query_obj(self):
qry = super(WorldMapViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
qry['groupby'] = [self.form_data['entity']]
return qry
def get_data(self, df):
from superset.data import countries
fd = self.form_data
cols = [fd.get('entity')]
metric = fd.get('metric')
secondary_metric = fd.get('secondary_metric')
if metric == secondary_metric:
ndf = df[cols]
# df[metric] will be a DataFrame
# because there are duplicate column names
ndf['m1'] = df[metric].iloc[:, 0]
ndf['m2'] = ndf['m1']
else:
cols += [metric, secondary_metric]
ndf = df[cols]
df = ndf
df.columns = ['country', 'm1', 'm2']
d = df.to_dict(orient='records')
for row in d:
country = None
if isinstance(row['country'], string_types):
country = countries.get(
fd.get('country_fieldtype'), row['country'])
if country:
row['country'] = country['cca3']
row['latitude'] = country['lat']
row['longitude'] = country['lng']
row['name'] = country['name']
else:
row['country'] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
def query_obj(self):
qry = super(FilterBoxViz, self).query_obj()
groupby = self.form_data.get('groupby')
if len(groupby) < 1 and not self.form_data.get('date_filter'):
raise Exception(_("Pick at least one filter field"))
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self, df):
qry = self.query_obj()
filters = [g for g in self.form_data['groupby']]
d = {}
for flt in filters:
qry['groupby'] = [flt]
df = super(FilterBoxViz, self).get_df(qry)
d[flt] = [{
'id': row[0],
'text': row[0],
'filter': flt,
'metric': row[1]}
for row in df.itertuples(index=False)
]
return d
class IFrameViz(BaseViz):
"""You can squeeze just about anything in this iFrame component"""
viz_type = "iframe"
verbose_name = _("iFrame")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def get_df(self):
return None
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
'Syntagmatic\'s library</a>')
is_timeseries = False
def query_obj(self):
d = super(ParallelCoordinatesViz, self).query_obj()
fd = self.form_data
d['metrics'] = copy.copy(fd.get('metrics'))
second = fd.get('secondary_metric')
if second not in d['metrics']:
d['metrics'] += [second]
d['groupby'] = [fd.get('series')]
return d
def get_data(self, df):
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
'bl.ocks.org</a>')
def query_obj(self):
d = super(HeatmapViz, self).query_obj()
fd = self.form_data
d['metrics'] = [fd.get('metric')]
d['groupby'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
return d
def get_data(self, df):
fd = self.form_data
x = fd.get('all_columns_x')
y = fd.get('all_columns_y')
v = fd.get('metric')
if x == y:
df.columns = ['x', 'y', 'v']
else:
df = df[[x, y, v]]
df.columns = ['x', 'y', 'v']
norm = fd.get('normalize_across')
overall = False
if norm == 'heatmap':
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df['perc'] = (
gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min()))
)
if overall:
v = df.v
min_ = v.min()
df['perc'] = (v - min_) / (v.max() - min_)
return df.to_dict(orient="records")
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
'd3-horizon-chart</a>')
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = (
'<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>')
def query_obj(self):
d = super(MapboxViz, self).query_obj()
fd = self.form_data
label_col = fd.get('mapbox_label')
if not fd.get('groupby'):
d['columns'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise Exception(_(
"Must have a [Group By] column to have 'count' as the [Label]"))
d['columns'].append(label_col[0])
if fd.get('point_radius') != 'Auto':
d['columns'].append(fd.get('point_radius'))
d['columns'] = list(set(d['columns']))
else:
# Ensuring columns chosen are all in group by
if (label_col and len(label_col) >= 1 and
label_col[0] != "count" and
label_col[0] not in fd.get('groupby')):
raise Exception(_(
"Choice of [Label] must be present in [Group By]"))
if (fd.get("point_radius") != "Auto" and
fd.get("point_radius") not in fd.get('groupby')):
raise Exception(_(
"Choice of [Point Radius] must be present in [Group By]"))
if (fd.get('all_columns_x') not in fd.get('groupby') or
fd.get('all_columns_y') not in fd.get('groupby')):
raise Exception(_(
"[Longitude] and [Latitude] columns must be present in [Group By]"))
return d
def get_data(self, df):
fd = self.form_data
label_col = fd.get('mapbox_label')
custom_metric = label_col and len(label_col) >= 1
metric_col = [None] * len(df.index)
if custom_metric:
if label_col[0] == fd.get('all_columns_x'):
metric_col = df[fd.get('all_columns_x')]
elif label_col[0] == fd.get('all_columns_y'):
metric_col = df[fd.get('all_columns_y')]
else:
metric_col = df[label_col[0]]
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")])
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"metric": metric,
"radius": point_radius,
},
"geometry": {
"type": "Point",
"coordinates": [lon, lat],
}
}
for lon, lat, metric, point_radius
in zip(
df[fd.get('all_columns_x')],
df[fd.get('all_columns_y')],
metric_col, point_radius_col)
]
}
return {
"geoJSON": geo_json,
"customMetric": custom_metric,
"mapboxApiKey": config.get('MAPBOX_API_KEY'),
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"viewportLongitude": fd.get("viewport_longitude"),
"viewportLatitude": fd.get("viewport_latitude"),
"viewportZoom": fd.get("viewport_zoom"),
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
class EventFlowViz(BaseViz):
"""A visualization to explore patterns in event sequences"""
viz_type = "event_flow"
verbose_name = _("Event flow")
credits = 'from <a href="https://github.com/williaster/data-ui">@data-ui</a>'
is_timeseries = True
def query_obj(self):
query = super(EventFlowViz, self).query_obj()
form_data = self.form_data
event_key = form_data.get('all_columns_x')
entity_key = form_data.get('entity')
meta_keys = [
col for col in form_data.get('all_columns') if col != event_key and col != entity_key
]
query['columns'] = [event_key, entity_key] + meta_keys
if form_data['order_by_entity']:
query['orderby'] = [(entity_key, True)]
return query
def get_data(self, df):
return df.to_dict(orient="records")
viz_types_list = [
TableViz,
PivotTableViz,
NVD3TimeSeriesViz,
NVD3DualLineViz,
NVD3CompareTimeSeriesViz,
NVD3TimeSeriesStackedViz,
NVD3TimeSeriesBarViz,
DistributionBarViz,
DistributionPieViz,
BubbleViz,
BulletViz,
MarkupViz,
WordCloudViz,
BigNumberViz,
BigNumberTotalViz,
SunburstViz,
DirectedForceViz,
SankeyViz,
CountryMapViz,
ChordViz,
WorldMapViz,
FilterBoxViz,
IFrameViz,
ParallelCoordinatesViz,
HeatmapViz,
BoxPlotViz,
TreemapViz,
CalHeatmapViz,
HorizonViz,
MapboxViz,
HistogramViz,
SeparatorViz,
EventFlowViz,
]
viz_types = OrderedDict([(v.viz_type, v) for v in viz_types_list
if v.viz_type not in config.get('VIZ_TYPE_BLACKLIST')])
| lina9527/easybi | viz.py | Python | mit | 54,580 | [
"VisIt"
] | 2cdcb39f79ec70e41e65226893558f5faf5b55184ce9f267ee06de658b3f75c1 |
from datetime import datetime
from dateutil.relativedelta import relativedelta
from django.test import TestCase, tag
from ..visit import WindowPeriod
from ..visit import Visit, VisitCodeError, VisitDateError
class TestVisit(TestCase):
def test_repr(self):
visit = Visit(code='1000',
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
self.assertIsNotNone(visit.__repr__())
def test_name(self):
visit = Visit(code='1000',
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
self.assertEqual(visit.name, '1000')
def test_visit_title(self):
visit = Visit(code='1000',
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
self.assertEqual(visit.title, 'Visit 1000')
self.assertEqual(str(visit), 'Visit 1000')
def test_visit_datetime(self):
visit = Visit(code='1000',
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
visit.timepoint_datetime = datetime(2001, 12, 1)
self.assertEqual(visit.timepoint_datetime, datetime(2001, 12, 1))
def test_visit_lower_upper_no_datetime(self):
visit = Visit(code='1000',
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
try:
visit.dates.lower
except VisitDateError:
pass
try:
visit.dates.upper
except VisitDateError:
pass
def test_visit_lower_upper(self):
visit = Visit(code='1000',
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
visit.timepoint_datetime = datetime(2001, 12, 1)
self.assertEqual(visit.dates.lower, datetime(2001, 12, 1))
self.assertEqual(visit.dates.upper, datetime(2001, 12, 7))
def test_window_period_days(self):
wp = WindowPeriod(
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
dt = datetime(2001, 12, 1)
self.assertEqual(wp.get_window(dt)[0], dt)
self.assertEqual(wp.get_window(dt).lower, dt)
self.assertEqual(wp.get_window(dt)[1], datetime(2001, 12, 7))
self.assertEqual(wp.get_window(dt).upper, datetime(2001, 12, 7))
def test_window_period_weeks(self):
wp = WindowPeriod(
rlower=relativedelta(weeks=1),
rupper=relativedelta(weeks=6))
dt = datetime(2001, 12, 8)
self.assertEqual(wp.get_window(dt).lower, datetime(2001, 12, 1))
self.assertEqual(wp.get_window(dt).upper, datetime(2002, 1, 19))
def test_good_codes(self):
try:
Visit(code='1000',
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
except (VisitCodeError) as e:
self.fail(f'VisitError unexpectedly raised. Got {e}')
try:
Visit(code='1000',
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
except (VisitCodeError) as e:
self.fail(f'VisitError unexpectedly raised. Got {e}')
def test_no_code(self):
self.assertRaises(
VisitCodeError, Visit, code=None,
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
def test_bad_code_not_string(self):
self.assertRaises(
VisitCodeError, Visit, code=1,
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
def test_bad_code_format(self):
self.assertRaises(
VisitCodeError, Visit, code='Aa-1',
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6))
| botswana-harvard/edc-visit-schedule | edc_visit_schedule/tests/test_visit.py | Python | gpl-2.0 | 4,371 | [
"VisIt"
] | 39473f6c9bedfa193043a31cb28c4b7514502d8a846c81d19d1eda1f4e54e9fb |
from django.test import SimpleTestCase
from corehq.apps.app_manager.tests.app_factory import AppFactory
from corehq.apps.app_manager.tests.util import TestXmlMixin, patch_get_xform_resource_overrides
from corehq.util.test_utils import flag_enabled
@patch_get_xform_resource_overrides()
class GridMenuSuiteTests(SimpleTestCase, TestXmlMixin):
def test_that_grid_style_is_added(self, *args):
"""
Confirms that style="grid" is added to the root menu
"""
factory = AppFactory(build_version='2.24.0')
factory.app.use_grid_menus = True
factory.new_basic_module('registration', 'patient registration')
factory.app.get_module(0).put_in_root = True
factory.new_basic_module('visit', 'patient visit')
factory.app.get_module(1).put_in_root = True
suite = factory.app.create_suite()
root_xpath = './menu[@id="root"]'
self.assertXmlHasXpath(suite, root_xpath)
self.assertXmlPartialEqual(
"""
<partial>
<menu id="root" style="grid">
<text><locale id="modules.m0"/></text>
<command id="m0-f0"/>
</menu>
<menu id="root" style="grid">
<text><locale id="modules.m1"/></text>
<command id="m1-f0"/>
</menu>
</partial>
""",
suite,
root_xpath
)
def test_that_root_menu_added(self, *args):
"""
Confirms that a menu is added with id="root" and style="grid"
when the app normally wouldn't have a menu with id="root".
"""
factory = AppFactory(build_version='2.24.0')
factory.app.use_grid_menus = True
factory.new_basic_module('registration', 'patient')
suite = factory.app.create_suite()
root_xpath = './menu[@id="root"]'
self.assertXmlHasXpath(suite, root_xpath)
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text/></menu></partial>',
suite,
root_xpath
)
def test_use_grid_menus_is_false(self, *args):
"""
Confirms that style="grid" is not added to any menus when use_grid_menus is False.
"""
factory = AppFactory(build_version='2.24.0')
factory.app.use_grid_menus = False
factory.new_basic_module('registration', 'patient')
suite = factory.app.create_suite()
style_xpath = './menu[@style="grid"]'
self.assertXmlDoesNotHaveXpath(suite, style_xpath)
def test_grid_menu_for_none(self, *args):
factory = AppFactory(build_version='2.24.3')
factory.app.create_profile()
factory.app.grid_form_menus = 'none'
factory.new_basic_module('registration', 'patient')
factory.app.get_module(0).display_style = 'grid'
root_xpath = './menu[@id="root"]'
m0_xpath = './menu[@id="m0"]'
# with Modules Menu to be list should not render root menu and render module w/o style=grid
factory.app.use_grid_menus = False
suite = factory.app.create_suite()
self.assertXmlDoesNotHaveXpath(suite, root_xpath)
self.assertXmlPartialEqual(
'<partial><menu id="m0"><text><locale id="modules.m0"/></text><command id="m0-f0"/></menu></partial>',
suite,
m0_xpath
)
# with Modules Menu to be grid should render root menu w/ style=grid and render module w/o style=grid
factory.app.use_grid_menus = True
suite = factory.app.create_suite()
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text/></menu></partial>',
suite,
root_xpath
)
self.assertXmlPartialEqual(
'<partial><menu id="m0"><text><locale id="modules.m0"/></text><command id="m0-f0"/></menu></partial>',
suite,
m0_xpath
)
def test_grid_menu_for_some(self, *args):
factory = AppFactory(build_version='2.24.3')
factory.app.create_profile()
factory.app.grid_form_menus = 'some'
factory.new_basic_module('registration', 'patient')
factory.new_basic_module('visit', 'patient visit')
factory.app.get_module(1).display_style = 'grid'
root_xpath = './menu[@id="root"]'
grid_module_xpath = './menu[@id="m1"]'
# with Modules Menu to be list should not render root menu and render module w/ style=grid
factory.app.use_grid_menus = False
suite = factory.app.create_suite()
self.assertXmlDoesNotHaveXpath(suite, root_xpath)
self.assertXmlHasXpath(suite, grid_module_xpath)
self.assertXmlPartialEqual(
'<partial><menu id="m1" style="grid"><text><locale id="modules.m1"/></text>\
<command id="m1-f0"/></menu></partial>',
suite,
grid_module_xpath
)
# with Modules Menu to be grid should render both root menu and module w/ style=grid
factory.app.use_grid_menus = True
suite = factory.app.create_suite()
self.assertXmlHasXpath(suite, root_xpath)
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text/></menu></partial>',
suite,
root_xpath
)
self.assertXmlPartialEqual(
'<partial><menu id="m1" style="grid"><text><locale id="modules.m1"/></text>\
<command id="m1-f0"/></menu></partial>',
suite,
grid_module_xpath
)
# with module itself being the root should render root menu style=grid with module content
factory.app.get_module(1).put_in_root = True
suite = factory.app.create_suite()
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text><locale id="modules.m1"/></text>\
<command id="m1-f0"/></menu></partial>',
suite,
root_xpath
)
def test_grid_menu_for_all(self, *args):
factory = AppFactory(build_version='2.24.3')
factory.app.create_profile()
factory.app.grid_form_menus = 'all'
factory.new_basic_module('registration', 'patient')
suite = factory.app.create_suite()
root_xpath = './menu[@id="root"]'
grid_module_xpath = './menu[@id="m0"]'
# with Modules Menu to be list should not render root menu and render module w/ style=grid
factory.app.use_grid_menus = False
self.assertXmlDoesNotHaveXpath(suite, root_xpath)
self.assertXmlPartialEqual(
'<partial><menu id="m0" style="grid"><text><locale id="modules.m0"/></text>\
<command id="m0-f0"/></menu></partial>',
suite,
grid_module_xpath
)
# with Modules Menu to be grid should render root menu and module w/ style=grid
factory.app.use_grid_menus = True
suite = factory.app.create_suite()
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text/></menu></partial>',
suite,
root_xpath
)
self.assertXmlPartialEqual(
'<partial><menu id="m0" style="grid"><text><locale id="modules.m0"/></text>\
<command id="m0-f0"/></menu></partial>',
suite,
grid_module_xpath
)
# with Modules Menu to be list and module itself being the root should render root w/o style=grid with
# module content
factory.app.use_grid_menus = False
factory.app.get_module(0).put_in_root = True
suite = factory.app.create_suite()
self.assertXmlPartialEqual(
'<partial><menu id="root"><text><locale id="modules.m0"/></text>\
<command id="m0-f0"/></menu></partial>',
suite,
root_xpath
)
# with Modules Menu to be grid and module itself being the root should render root w/ style=grid with
# module content
factory.app.get_module(0).put_in_root = True
factory.app.use_grid_menus = True
suite = factory.app.create_suite()
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text><locale id="modules.m0"/></text>\
<command id="m0-f0"/></menu></partial>',
suite,
root_xpath
)
| dimagi/commcare-hq | corehq/apps/app_manager/tests/test_grid_menus.py | Python | bsd-3-clause | 8,419 | [
"VisIt"
] | d66a0979bb1ff0302b79860cf1c7e6bc2f9632ffac337d229d75df3183032b0e |
#!/opt/local/bin/python
"""
Calculate the total energy as a function of lattice constant,
by altering the lattice constant in in.pw file.
And if possible, calculate equilibrium lattice size and
bulk modulus, too.
Assume using Quantum ESPRESSO (QE).
PREPARE mode creates directories and in.pw files to be computed by QE.
ANALYZE mode reads the data obtained via QE and computes some values.
Usage:
etot_vs_size.py prepare [options] POSCAR
etot_vs_size.py analyze
Options:
-h, --help Shows this message and exit.
-n NITER Number of points to be calculated. [default: 11]
In case of even number, original size is not used.
--no-LS Do not perform least square fitting. [default: False]
-s STRAIN Maximum strain value in a direction. [default: 0.05]
-x Change in x-direction. [default: False]
-y Change in y-direction. [default: False]
-z Change in z-direction. [default: False]
--calc=CALC
Calculation type. [default: scf]
-o OUTFNAME
Output file name. [default: in.pw]
-p,--pitch=PITCH
Pitch of k-points. [default: 0.0968]
"""
import sys,os,commands,copy
import numpy as np
from docopt import docopt
from ase.io import read
from scipy.optimize import leastsq
import json
sys.path.append(os.path.dirname(__file__))
from poscar2qein import write_espresso_in
__author__ = 'Ryo KOBAYASHI'
__version__ = '160922'
__licence__ = 'MIT'
_confname = 'conf.etot_vs_size.json'
def read_POSCAR(fname='POSCAR'):
f=open(fname,'r')
#...1st line: comment
cmmt= f.readline()
#...read 1st line and get current lattice size
al= float(f.readline().split()[0])
hmat= np.zeros((3,3))
hmat[0]= [ float(x) for x in f.readline().split() ]
hmat[1]= [ float(x) for x in f.readline().split() ]
hmat[2]= [ float(x) for x in f.readline().split() ]
buffer= f.readline().split()
if buffer[0].isdigit():
natm= 0
for b in buffer:
natm += int(b)
else:
natm= 0
for b in f.readline().split():
natm += int(b)
f.close()
return (al,hmat,natm)
def get_vol(al,hmat):
a1= hmat[0:3,0] *al
a2= hmat[0:3,1] *al
a3= hmat[0:3,2] *al
return np.dot(a1,np.cross(a2,a3))
def replace_lattice_constant(x,infname='POSCAR',outfname='POSCAR'):
f=open(infname,'r')
ini= f.readlines()
f.close()
g=open(outfname,'w')
for l in range(len(ini)):
if l == 1:
g.write(' {0:10.4f}\n'.format(x))
else:
g.write(ini[l])
g.close()
def replace_hmat(hmat,infname='POSCAR',outfname='POSCAR'):
f=open(infname,'r')
ini= f.readlines()
f.close()
g=open(outfname,'w')
for l in range(len(ini)):
if 2 <= l <= 4:
g.write(' {0:12.7f} {1:12.7f} {2:12.7f}\n'.format(hmat[l-2,0],hmat[l-2,1],hmat[l-2,2]))
else:
g.write(ini[l])
g.close()
def residuals(p,y,x):
b,bp,v0,ev0= p
err= y -( b*x/(bp*(bp-1.0)) *(bp*(1.0-v0/x) +(v0/x)**bp -1.0) +ev0 )
return err
def peval(x,p):
b,bp,v0,ev0= p
return b*x/(bp*(bp-1.0)) *(bp*(1.0-v0/x) +(v0/x)**bp -1.0) +ev0
def prepare(args):
niter= int(args['-n'])
no_LS= args['--no-LS']
mvx= args['-x']
mvy= args['-y']
mvz= args['-z']
strain= float(args['-s'])
sfac = float(args['-s'])
calc = args['--calc']
pitch = float(args['--pitch'])
outfname = args['-o']
poscar = args['POSCAR']
if niter < 2:
raise RuntimeError('NITER must be larger than 1')
atoms0 = read(poscar,format='vasp')
atoms = atoms0.copy()
al_orig,hmat_orig,natm= read_POSCAR()
hmat= copy.copy(hmat_orig)
hmat_min= copy.copy(hmat_orig)
hmat_max= copy.copy(hmat_orig)
dhmat= np.zeros((3,3),dtype=float)
if not mvx and not mvy and not mvz:
al_min= al_orig*(1.0-strain)
al_max= al_orig*(1.0+strain)
dl= (al_max-al_min)/(niter-1)
else:
if mvx:
hmat_min[0]= hmat_orig[0]*(1.0-strain)
hmat_max[0]= hmat_orig[0]*(1.0+strain)
if mvy:
hmat_min[1]= hmat_orig[1]*(1.0-strain)
hmat_max[1]= hmat_orig[1]*(1.0+strain)
if mvz:
hmat_min[2]= hmat_orig[2]*(1.0-strain)
hmat_max[2]= hmat_orig[2]*(1.0+strain)
dhmat= (hmat_max -hmat_min)/(niter-1)
for iter in range(niter):
dname= "etot_vs_size_{0:05d}".format(iter)
print dname
os.system("mkdir -p "+dname)
os.system('cp INCAR KPOINTS POTCAR '+dname+'/')
if not mvx and not mvy and not mvz:
al= al_min +dl*iter
hmat= hmat_orig
replace_lattice_constant(al,infname='POSCAR',outfname=dname+'/POSCAR')
else:
al= al_orig
hmat= hmat_min +dhmat*iter
replace_hmat(hmat,infname='POSCAR',outfname=dname+'/POSCAR')
print 'prepare done.'
print ''
print 'Perform VASP calculations in those directories and then run the following command,'
print 'python etot_vs_size.py analyze'
print ''
def analyze(config):
"""
Analyze etot_vs_size using VASP results in etot_vs_size_##### directories.
Configuration are read from config.etot_vs_size.json.
"""
niter= int(config['-n'])
no_LS= config['--no-LS']
mvx= config['-x']
mvy= config['-y']
mvz= config['-z']
strain= float(config['-s'])
al_orig,hmat_orig,natm= read_POSCAR()
hmat= copy.copy(hmat_orig)
hmat_min= copy.copy(hmat_orig)
hmat_max= copy.copy(hmat_orig)
dhmat= np.zeros((3,3),dtype=float)
if not mvx and not mvy and not mvz:
al_min= al_orig*(1.0-strain)
al_max= al_orig*(1.0+strain)
dl= (al_max-al_min)/(niter-1)
else:
if mvx:
hmat_min[0]= hmat_orig[0]*(1.0-strain)
hmat_max[0]= hmat_orig[0]*(1.0+strain)
if mvy:
hmat_min[1]= hmat_orig[1]*(1.0-strain)
hmat_max[1]= hmat_orig[1]*(1.0+strain)
if mvz:
hmat_min[2]= hmat_orig[2]*(1.0-strain)
hmat_max[2]= hmat_orig[2]*(1.0+strain)
dhmat= (hmat_max -hmat_min)/(niter-1)
logfile= open('log.etot_vs_size','w')
outfile1= open('out.etot_vs_size','w')
for iter in range(niter):
dname= "etot_vs_size_{0:05d}".format(iter)
if not mvx and not mvy and not mvz:
al= al_min +dl*iter
hmat= hmat_orig
#replace_lattice_constant(al)
else:
al= al_orig
hmat= hmat_min +dhmat*iter
#replace_hmat(hmat)
erg= float(commands.getoutput("tail -n1 {0:s}/OSZICAR".format(dname) \
+" | awk '{print $5}'"))
#os.system("mkdir -p "+dname)
#os.system("cp vasprun.xml {0}/".format(dname))
vol= get_vol(al,hmat)
print ' {0:10.4f} {1:10.4f} {2:15.7f}'.format(al,vol,erg)
outfile1.write(' {0:10.4f} {1:10.4f} {2:15.7f}\n'.format(al,vol,erg))
logfile.write(' {0:10.4f} {1:10.4f} {2:15.7f}\n'.format(al,vol,erg))
outfile1.close()
#...prepare for Murnaghan fitting
f= open('out.etot_vs_size','r')
lines= f.readlines()
xarr= np.zeros((len(lines)))
yarr= np.zeros((len(lines)))
for l in range(len(lines)):
dat= lines[l].split()
xarr[l]= float(dat[1])
yarr[l]= float(dat[2])
f.close()
#...set initial values
b= 1.0
bp= 2.0
ev0= min(yarr)
v0= xarr[len(xarr)/2]
p0= np.array([b,bp,v0,ev0])
#...least square fitting
plsq= leastsq(residuals,p0,args=(yarr,xarr))
#...output results
print ' plsq=',plsq[0]
print '{0:=^72}'.format(' RESULTS ')
logfile.write('{0:=^72}\n'.format(' RESULTS '))
a1= hmat_orig[0:3,0]
a2= hmat_orig[0:3,1]
a3= hmat_orig[0:3,2]
uvol= np.dot(a1,np.cross(a2,a3))
lc= (plsq[0][2]/uvol)**(1.0/3)
print ' Lattice constant = {0:10.4f} Ang.'.format(lc)
print ' Cohesive energy = {0:10.3f} eV'.format(plsq[0][3]/natm)
print ' Bulk modulus = {0:10.2f} GPa'.format(plsq[0][0]*1.602e+2)
logfile.write(' Lattice constant = {0:10.4f} Ang.\n'.format(lc))
logfile.write(' Cohesive energy = {0:10.3f} eV\n'.format(plsq[0][3]/natm))
logfile.write(' Bulk modulus = {0:10.2f} GPa\n'.format(plsq[0][0]*1.602e+2))
print '{0:=^72}'.format(' OUTPUT ')
print ' * out.etot_vs_size'
print ' * log.etot_vs_size'
if __name__ == '__main__':
args= docopt(__doc__,version=__version__)
if args['prepare']:
with open(_confname,'w') as f:
f.write(json.dumps(args,sort_keys=True,indent=4))
prepare(args)
elif args['analyze']:
try:
with open(_confname,'r') as f:
config= json.load(f)
except:
raise RuntimeError('Cannot read '+_confname)
analyze(config)
| ryokbys/nap | nappy/espresso/etot_vs_size.py | Python | mit | 8,878 | [
"ASE",
"Quantum ESPRESSO",
"VASP"
] | 535d1309b3c16d109803fd809687d594b6ea824ad220ed689a910652cf0bf183 |
import ast
import configparser
import os
import botocore.session
import botocore.exceptions
from platform import system
from types import MethodType
def get_prepared_config(
profile,
region,
ssl_verification,
adfs_ca_bundle,
adfs_host,
output_format,
provider_id,
s3_signature_version,
session_duration,
sspi,
username_password_command,
):
"""
Prepares ADF configuration for login task.
The task comprises steps as follows:
- default configuration preparation,
- creating aws cli configuration files, if needed
- loading adf configuration for specified aws profiles
The configuration is stored in ctx.adfs_config attribute
:param output_format: output format used by aws cli
:param adfs_host: fqdn of adfs host that will be used to authenticate user
:param ssl_verification: SSL certificate verification: Whether or not strict certificate
verification is done, False should only be used for dev/test
:param adfs_ca_bundle: Override CA bundle for SSL certificate
verification for ADFS server only.
:param region: The default AWS region that this script will connect
to for all API calls
:param profile: aws cli profile
:param provider_id: Provider ID, e.g urn:amazon:webservices (optional)
:param s3_signature_version: s3 signature version
:param session_duration: AWS STS session duration (default 1 hour)
:param sspi: Whether SSPI is enabled
:param username_password_command: The command used to retrieve username and password information
"""
def default_if_none(value, default):
return value if value is not None else default
adfs_config = create_adfs_default_config(profile='default')
adfs_config.profile = default_if_none(profile, adfs_config.profile)
_create_base_aws_cli_config_files_if_needed(adfs_config)
_load_adfs_config_from_stored_profile(adfs_config, adfs_config.profile)
adfs_config.ssl_verification = default_if_none(ssl_verification, adfs_config.ssl_verification)
adfs_config.adfs_ca_bundle = default_if_none(adfs_ca_bundle, adfs_config.adfs_ca_bundle)
adfs_config.region = default_if_none(region, adfs_config.region)
adfs_config.adfs_host = default_if_none(adfs_host, adfs_config.adfs_host)
adfs_config.output_format = default_if_none(output_format, adfs_config.output_format)
adfs_config.provider_id = default_if_none(provider_id, adfs_config.provider_id)
adfs_config.s3_signature_version = default_if_none(
s3_signature_version,
adfs_config.s3_signature_version
)
adfs_config.session_duration = default_if_none(session_duration, adfs_config.session_duration)
adfs_config.sspi = default_if_none(sspi, adfs_config.sspi)
adfs_config.username_password_command = default_if_none(username_password_command, adfs_config.username_password_command)
return adfs_config
def create_adfs_default_config(profile):
config = type('', (), {})()
# Use botocore session API to get defaults
session = _create_aws_session(profile)
# region: The default AWS region that this script will connect
# to for all API calls
config.region = session.get_config_variable('region') or 'eu-central-1'
# aws cli profile to store config and access keys into
config.profile = session.profile or 'default'
# output format: The AWS CLI output format that will be configured in the
# adf profile (affects subsequent CLI calls)
config.output_format = session.get_config_variable('format') or 'json'
# aws credential location: The file where this script will store the temp
# credentials under the configured profile
config.aws_credentials_location = os.path.expanduser(session.get_config_variable('credentials_file'))
config.aws_config_location = os.path.expanduser(session.get_config_variable('config_file'))
# cookie location: The file where this script will store the ADFS session cookies
config.adfs_cookie_location = os.path.join(os.path.dirname(config.aws_credentials_location), 'adfs_cookies')
# SSL certificate verification: Whether or not strict certificate
# verification is done, False should only be used for dev/test
config.ssl_verification = True
# Override CA bundle for SSL certificate verification for ADFS server only.
config.adfs_ca_bundle = None
# AWS role arn
config.role_arn = None
config.adfs_host = None
config.adfs_user = None
# aws provider id. (Optional - 9/10 times it will always be urn:amazon:websevices)
config.provider_id = 'urn:amazon:webservices'
# Note: if your bucket require CORS, it is advised that you use path style addressing
# (which is set by default in signature version 4).
config.s3_signature_version = None
# AWS STS session duration, default is 3600 seconds
config.session_duration = int(3600)
# Whether SSPI is enabled
config.sspi = system() == "Windows"
# The command used to retrieve username and password information
config.username_password_command = None
return config
def _create_aws_session(profile):
def _create_and_verify(profile_to_use=None):
session = botocore.session.Session(profile=profile_to_use)
session.get_config_variable('region')
return session
try:
session = _create_and_verify(profile)
except botocore.exceptions.ProfileNotFound:
try:
session = _create_and_verify('default')
except botocore.exceptions.ProfileNotFound:
session = _create_and_verify()
return session
def _load_adfs_config_from_stored_profile(adfs_config, profile):
def get_or(self, profile, option, default_value):
if self.has_option(profile, option):
return self.get(profile, option)
return default_value
def load_from_config(config_location, profile, loader):
config = configparser.RawConfigParser()
config.read(config_location)
if config.has_section(profile):
setattr(config, get_or.__name__, MethodType(get_or, config))
loader(config, profile)
del config
def load_config(config, profile):
adfs_config.region = config.get_or(profile, 'region', adfs_config.region)
adfs_config.output_format = config.get_or(profile, 'output', adfs_config.output_format)
adfs_config.ssl_verification = ast.literal_eval(config.get_or(
profile, 'adfs_config.ssl_verification',
str(adfs_config.ssl_verification)))
adfs_config.role_arn = config.get_or(profile, 'adfs_config.role_arn', adfs_config.role_arn)
adfs_config.adfs_host = config.get_or(profile, 'adfs_config.adfs_host', adfs_config.adfs_host)
adfs_config.adfs_user = config.get_or(profile, 'adfs_config.adfs_user', adfs_config.adfs_user)
adfs_config.provider_id = config.get_or(profile, 'adfs_config.provider_id', adfs_config.provider_id)
adfs_config.s3_signature_version = None
rawS3SubSection = config.get_or(profile, 's3', None)
if rawS3SubSection:
s3SubSection = configparser.RawConfigParser()
setattr(s3SubSection, get_or.__name__, MethodType(get_or, s3SubSection))
s3SubSection.read_string('[s3_section]\n' + rawS3SubSection)
adfs_config.s3_signature_version = s3SubSection.get_or(
's3_section',
'signature_version',
adfs_config.s3_signature_version
)
adfs_config.session_duration = config.get_or(
profile, 'adfs_config.session_duration',
adfs_config.session_duration)
adfs_config.sspi = ast.literal_eval(config.get_or(
profile, 'adfs_config.sspi',
str(adfs_config.sspi)))
adfs_config.username_password_command = config.get_or(profile, 'adfs_config.username_password_command', adfs_config.username_password_command)
if profile == 'default':
load_from_config(adfs_config.aws_config_location, profile, load_config)
else:
load_from_config(adfs_config.aws_config_location, 'profile ' + profile, load_config)
def _create_base_aws_cli_config_files_if_needed(adfs_config):
def touch(fname, mode=0o600):
flags = os.O_CREAT | os.O_APPEND
with os.fdopen(os.open(fname, flags, mode)) as f:
try:
os.utime(fname, None)
finally:
f.close()
aws_config_root = os.path.dirname(adfs_config.aws_config_location)
if not os.path.exists(aws_config_root):
os.mkdir(aws_config_root, 0o700)
if not os.path.exists(adfs_config.aws_credentials_location):
touch(adfs_config.aws_credentials_location)
aws_credentials_root = os.path.dirname(adfs_config.aws_credentials_location)
if not os.path.exists(aws_credentials_root):
os.mkdir(aws_credentials_root, 0o700)
if not os.path.exists(adfs_config.aws_config_location):
touch(adfs_config.aws_config_location)
| venth/aws-adfs | aws_adfs/prepare.py | Python | mit | 9,150 | [
"ADF"
] | ae09b620d7ed7fe21b2171acf5305dcf13f898a59b9e5046d3827c5bb8b84e7a |
# Copyright (C) 2008-2009 Mark A. Matienzo
#
# This file is part of worldcat, the Python WorldCat API module.
#
# worldcat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# worldcat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with worldcat. If not, see <http://www.gnu.org/licenses/>.
# request/search.py -- Request objects for WorldCat Search API
import urllib2
from exceptions import StopIteration
from worldcat.exceptions import APIKeyError, APIKeyNotSpecifiedError, \
EmptyQueryError, EmptyRecordNumberError, \
InvalidArgumentError, ExtractError
from worldcat.request import WorldCatRequest
from worldcat.response.search import SearchAPIResponse
from worldcat.util.extract import extract_elements
class SearchAPIRequest(WorldCatRequest):
"""request.search.SearchAPIRequest: base class for all search API requests
SearchAPIRequests require an API key when an instance is created. This is
done by passing the 'wskey' kwarg. E.g.:
>>> s = SearchAPIRequest(wskey='...insert your api key here')
"""
def __init__(self, **kwargs):
"""Constructor for SearchAPIRequest"""
if 'wskey' not in kwargs:
raise APIKeyNotSpecifiedError
WorldCatRequest.__init__(self, **kwargs)
self._validators = {
'servicelevel': ('default', 'full'),
'cformat': ('apa', 'chicago', 'harvard',
'mla', 'turabian', 'all'),
'recordSchema': ('info:srw/schema/1/marcxml',
'info:srw/schema/1/marcxml-v1.1',
'info:srw/schema/1/dc',
'info:srw/schema/1/dc-v1.1'),
'format': ('atom', 'rss')}
def get_response(self):
"""Get method for SearchAPIRequests.
Exception handling is specific to SearchAPIRequests.
"""
try:
self.http_get()
except urllib2.HTTPError, e:
if e.code == 407:
raise APIKeyError
elif e.code == 400:
raise APIKeyNotSpecifiedError
else:
raise
return SearchAPIResponse(self)
class OpenSearchRequest(SearchAPIRequest):
"""request.search.OpenSearchRequest: queries search API using OpenSearch
OpenSearchRequests are always keyword searches."""
def __init__(self, **kwargs):
"""Constructor for OpenSearch requests"""
SearchAPIRequest.__init__(self, **kwargs)
def __iter__(self):
return self
def api_url(self):
"""API ase URL method for OpenSearchRequests."""
self.url = 'http://worldcat.org/webservices/catalog/search/opensearch'
def next(self):
_i = extract_elements(self.response,
element='{http://a9.com/-/spec/opensearch/1.1/}startIndex')
_p = extract_elements(self.response,
element='{http://a9.com/-/spec/opensearch/1.1/}itemsPerPage')
_t = extract_elements(self.response,
element='{http://a9.com/-/spec/opensearch/1.1/}totalResults')
try:
if int(_t[0].text) > (int(_i[0].text) + int(_p[0].text)):
self.args['start'] = int(_i[0].text) + int(_p[0].text)
else:
raise StopIteration
except ValueError:
raise StopIteration
def subclass_validator(self, quiet=False):
"""Validator method for OpenSearchRequests."""
if 'q' not in self.args:
if quiet == True:
return False
else:
raise EmptyQueryError
else:
return True
class SRURequest(SearchAPIRequest):
"""request.search.SRURequest: queries search API using SRU
SRURequests should be used when fielded searching is desired.
"""
def __init__(self, **kwargs):
"""Constructor method for SRURequests."""
SearchAPIRequest.__init__(self, **kwargs)
def __iter__(self):
return self
def api_url(self):
self.url = 'http://worldcat.org/webservices/catalog/search/sru'
def next(self):
_i = extract_elements(self.response,
element='{http://www.loc.gov/zing/srw/}nextRecordPosition')
if len(_i) != 0:
if _i[0].text is not None:
self.args['startRecord'] = int(_i[0].text)
else:
raise StopIteration
else:
raise StopIteration
def subclass_validator(self, quiet=False):
"""Validator method for SRURequests."""
if 'query' not in self.args:
if quiet == True:
return False
else:
raise EmptyQueryError
else:
return True
class ContentRequest(SearchAPIRequest):
"""request.search.ContentRequest: search API content request metaclass
ContentRequests are always for an individual record number; they must have
rec_num as a mandatory parameter when an instance is created.
"""
def __init__(self, rec_num, **kwargs):
"""Constructor method for ContentRequests."""
SearchAPIRequest.__init__(self, **kwargs)
self.rec_num = rec_num
def subclass_validator(self, quiet=False):
"""Validator method for ContentRequests."""
if self.rec_num is None:
if quiet == True:
return False
else:
raise EmptyRecordNumberError
else:
return True
class BibRequest(ContentRequest):
"""request.search.BibRequest: retrieves single bibliographic records
BibRequests only provide SearchAPIResponses where response_format is
'xml' and record_format is 'marcxml'.
"""
def __init__(self, rec_num=None, **kwargs):
"""Constructor for BibRequests."""
ContentRequest.__init__(self, rec_num, **kwargs)
def api_url(self):
"""Get method for BibRequests."""
self.url = 'http://worldcat.org/webservices/catalog/content/%s' \
% self.rec_num
class CitationRequest(ContentRequest):
"""request.search.CitationRequest: retrieves formatted HTML citations
CitationRequests should always have a SearchAPIResponse where
response_format is 'html' and record_format is unset.
TODO: Consider handling citation format."""
def __init__(self, rec_num=None, **kwargs):
"""Constructor for CitationRequests."""
ContentRequest.__init__(self, rec_num, **kwargs)
def api_url(self):
"""Get method for CitationRequests."""
self.url = \
'http://worldcat.org/webservices/catalog/content/citations/%s' \
% self.rec_num
class LibrariesRequest(ContentRequest):
"""request.search.LibrariesRequest: retrieves holdings for a single record
HoldingsRequests universally going to have a SearchAPIResponse where
response_format is 'xml' and record_format is 'iso20775'.
TODO: Add code to allow request w/o recnum to get based on OCLC symbol
"""
def __init__(self, rec_num=None, num_type='oclc', **kwargs):
"""Constructor for HoldingsRequests."""
self._nt_validator = {'oclc': '', 'isbn': 'isbn/'}
self.num_type = num_type
ContentRequest.__init__(self, rec_num, **kwargs)
def api_url(self):
"""Get method for HoldingsRequests."""
self.url = 'http://worldcat.org/webservices/catalog/content/libraries'
if self.rec_num is not None:
self.url = '%s/%s%s' \
% (self.url, self._nt_validator[self.num_type], self.rec_num)
def subclass_validator(self, quiet=False):
"""Validator method for HoldingsRequests.
Despite HoldingsRequests being able to handle ISBNs,
HoldingsRequest.validate() does not validate ISBNS."""
if self.num_type not in self._nt_validator:
if quiet == True:
return False
else:
raise InvalidNumberTypeError
else:
return True
| anarchivist/worldcat | worldcat/request/search.py | Python | gpl-3.0 | 8,497 | [
"ASE"
] | a6a338c2f76bc20c8e56c1c3194fec9faf2df50daf490c3585220c24b6807030 |
"""
molvs.cli
~~~~~~~~~
This module contains a command line interface for standardization.
:copyright: Copyright 2016 by Matt Swain.
:license: MIT, see LICENSE file for more details.
*Adapted for purposes of integration of MolVS into RDKit
"""
import argparse
import logging
import sys
from rdkit import Chem
from rdkit.Chem.MolStandardize import Standardizer, Validator
log = logging.getLogger(__name__)
FILETYPES = ['smi', 'mol', 'sdf']
class MolvsParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('Error: %s\n\n'.encode() % message)
self.print_help()
sys.exit(2)
def _read_mol(args):
if args.smiles:
return Chem.MolFromSmiles(args.smiles)
elif args.intype in {'smi', 'smiles'} or args.infile.name.endswith('smi') or args.infile.name.endswith('smiles'):
return Chem.MolFromSmiles(args.infile.read())
elif args.intype in {'mol', 'sdf'} or args.infile.name.endswith('mol') or args.infile.name.endswith('sdf'):
return Chem.MolFromMolBlock(args.infile.read())
else:
return Chem.MolFromSmiles(args.infile.read())
def _write_mol(mol, args):
if args.outtype in {'smi', 'smiles'} or args.outfile.name.endswith('smi') or args.outfile.name.endswith('smiles'):
args.outfile.write(Chem.MolToSmiles(mol))
args.outfile.write('\n')
elif args.outtype in {'mol', 'sdf'} or args.outfile.name.endswith('mol') or args.outfile.name.endswith('sdf'):
args.outfile.write(Chem.MolToMolBlock(mol))
else:
args.outfile.write(Chem.MolToSmiles(mol))
args.outfile.write('\n')
def standardize_main(args):
mol = _read_mol(args)
s = Standardizer()
mol = s.standardize(mol)
_write_mol(mol, args)
def validate_main(args):
mol = _read_mol(args)
v = Validator()
logs = v.validate(mol)
for log in logs:
args.outfile.write(log)
args.outfile.write('\n')
if __name__=='__main__':
"""Main function for molvs command line interface."""
# Root options
# parser = MolvsParser(epilog='use "molvs <command> -h" to show help for a specific command')
parser = MolvsParser(usage="usage: python cli.py [-h] {standardize,validate} ...")
subparsers = parser.add_subparsers(title='Available commands')
# Options common to all commands
common_parser = MolvsParser(add_help=False)
common_parser.add_argument('infile', nargs='?', help='input filename', type=argparse.FileType('r'), default=sys.stdin)
common_parser.add_argument('-i', '--intype', help='input filetype', choices=FILETYPES)
common_parser.add_argument('-:', '--smiles', help='input SMILES instead of file', metavar='<smiles>')
common_parser.add_argument('-O', '--outfile', help='output filename', type=argparse.FileType('w'), default=sys.stdout, metavar='<outfile>')
# Standardize options
standardize_parser = subparsers.add_parser('standardize', help='standardize a molecule', parents=[common_parser])
standardize_parser.add_argument('-o', '--outtype', help='output filetype', choices=FILETYPES)
standardize_parser.set_defaults(func=standardize_main)
# Validate options
validate_parser = subparsers.add_parser('validate', help='validate a molecule', parents=[common_parser])
validate_parser.set_defaults(func=validate_main)
args = parser.parse_args()
try:
args.func(args)
except Exception as e:
sys.stderr.write('Error: %s\n\n'.encode() % e.message)
parser.print_help()
sys.exit(2)
| bp-kelley/rdkit | Contrib/MolVS/molvs_cli.py | Python | bsd-3-clause | 3,536 | [
"RDKit"
] | 04e2210436e49ac9d08ad3beefa967b0470e66d340e5be9c0737c83a23184dc3 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
import webnotes.utils
from webnotes.utils import cstr, flt, getdate
from webnotes.model.bean import getlist
from webnotes.model.code import get_obj
from webnotes import msgprint
from webnotes.model.mapper import get_mapped_doclist
from controllers.selling_controller import SellingController
class DocType(SellingController):
def __init__(self, doc, doclist=None):
self.doc = doc
if not doclist: doclist = []
self.doclist = doclist
self.tname = 'Sales Order Item'
self.fname = 'sales_order_details'
self.person_tname = 'Target Detail'
self.partner_tname = 'Partner Target Detail'
self.territory_tname = 'Territory Target Detail'
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.doc.delivery_date:
if getdate(self.doc.transaction_date) > getdate(self.doc.delivery_date):
msgprint("Expected Delivery Date cannot be before Sales Order Date")
raise Exception
def validate_po(self):
# validate p.o date v/s delivery date
if self.doc.po_date and self.doc.delivery_date and getdate(self.doc.po_date) > getdate(self.doc.delivery_date):
msgprint("Expected Delivery Date cannot be before Purchase Order Date")
raise Exception
if self.doc.po_no and self.doc.customer:
so = webnotes.conn.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.doc.po_no, self.doc.name, self.doc.customer))
if so and so[0][0]:
msgprint("""Another Sales Order (%s) exists against same PO No and Customer.
Please be sure, you are not making duplicate entry.""" % so[0][0])
def validate_for_items(self):
check_list, flag = [], 0
chk_dupl_itm = []
for d in getlist(self.doclist, 'sales_order_details'):
e = [d.item_code, d.description, d.reserved_warehouse, d.prevdoc_docname or '']
f = [d.item_code, d.description]
if webnotes.conn.get_value("Item", d.item_code, "is_stock_item") == 'Yes':
if not d.reserved_warehouse:
msgprint("""Please enter Reserved Warehouse for item %s
as it is stock Item""" % d.item_code, raise_exception=1)
if e in check_list:
msgprint("Item %s has been entered twice." % d.item_code)
else:
check_list.append(e)
else:
if f in chk_dupl_itm:
msgprint("Item %s has been entered twice." % d.item_code)
else:
chk_dupl_itm.append(f)
# used for production plan
d.transaction_date = self.doc.transaction_date
tot_avail_qty = webnotes.conn.sql("select projected_qty from `tabBin` \
where item_code = '%s' and warehouse = '%s'" % (d.item_code,d.reserved_warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
def validate_sales_mntc_quotation(self):
for d in getlist(self.doclist, 'sales_order_details'):
if d.prevdoc_docname:
res = webnotes.conn.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.doc.order_type))
if not res:
msgprint("""Order Type (%s) should be same in Quotation: %s \
and current Sales Order""" % (self.doc.order_type, d.prevdoc_docname))
def validate_order_type(self):
super(DocType, self).validate_order_type()
def validate_delivery_date(self):
if self.doc.order_type == 'Sales' and not self.doc.delivery_date:
msgprint("Please enter 'Expected Delivery Date'")
raise Exception
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.doc.project_name and self.doc.customer_name:
res = webnotes.conn.sql("select name from `tabProject` where name = '%s' and (customer = '%s' or ifnull(customer,'')='')"%(self.doc.project_name, self.doc.customer))
if not res:
msgprint("Customer - %s does not belong to project - %s. \n\nIf you want to use project for multiple customers then please make customer details blank in project - %s."%(self.doc.customer,self.doc.project_name,self.doc.project_name))
raise Exception
def validate(self):
super(DocType, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
# self.validate_for_items()
self.validate_for_product()
self.validate_warehouse()
from stock.doctype.packed_item.packed_item import make_packing_list
self.doclist = make_packing_list(self,'sales_order_details')
self.validate_with_previous_doc()
if not self.doc.status:
self.doc.status = "Draft"
import utilities
utilities.validate_status(self.doc.status, ["Draft", "Submitted", "Stopped",
"Cancelled"])
if not self.doc.billing_status: self.doc.billing_status = 'Not Billed'
if not self.doc.delivery_status: self.doc.delivery_status = 'Not Delivered'
def validate_warehouse(self):
from stock.utils import validate_warehouse_user, validate_warehouse_company
warehouses = list(set([d.reserved_warehouse for d in
self.doclist.get({"doctype": self.tname}) if d.reserved_warehouse]))
for w in warehouses:
validate_warehouse_user(w)
validate_warehouse_company(w, self.doc.company)
def validate_with_previous_doc(self):
super(DocType, self).validate_with_previous_doc(self.tname, {
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = webnotes.conn.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
webnotes.conn.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in self.doclist.get_distinct_values("prevdoc_docname"):
bean = webnotes.bean("Quotation", quotation)
if bean.doc.docstatus==2:
webnotes.throw(quotation + ": " + webnotes._("Quotation is cancelled."))
bean.get_controller().set_status(update=True)
def on_submit(self):
self.update_stock_ledger(update_stock = 1)
self.check_credit(self.doc.grand_total)
get_obj('Authorization Control').validate_approving_authority(self.doc.doctype, self.doc.grand_total, self)
self.update_prevdoc_status('submit')
webnotes.conn.set(self.doc, 'status', 'Submitted')
def on_cancel(self):
# Cannot cancel stopped SO
if self.doc.status == 'Stopped':
msgprint("Sales Order : '%s' cannot be cancelled as it is Stopped. Unstop it for any further transactions" %(self.doc.name))
raise Exception
self.check_nextdoc_docstatus()
self.update_stock_ledger(update_stock = -1)
self.update_prevdoc_status('cancel')
webnotes.conn.set(self.doc, 'status', 'Cancelled')
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = webnotes.conn.sql("select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2 where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1", self.doc.name)
if submit_dn:
msgprint("Delivery Note : " + cstr(submit_dn[0][0]) + " has been submitted against " + cstr(self.doc.doctype) + ". Please cancel Delivery Note : " + cstr(submit_dn[0][0]) + " first and then cancel "+ cstr(self.doc.doctype), raise_exception = 1)
# Checks Sales Invoice
submit_rv = webnotes.conn.sql("select t1.name from `tabSales Invoice` t1,`tabSales Invoice Item` t2 where t1.name = t2.parent and t2.sales_order = '%s' and t1.docstatus = 1" % (self.doc.name))
if submit_rv:
msgprint("Sales Invoice : " + cstr(submit_rv[0][0]) + " has already been submitted against " +cstr(self.doc.doctype)+ ". Please cancel Sales Invoice : "+ cstr(submit_rv[0][0]) + " first and then cancel "+ cstr(self.doc.doctype), raise_exception = 1)
#check maintenance schedule
submit_ms = webnotes.conn.sql("select t1.name from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2 where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1",self.doc.name)
if submit_ms:
msgprint("Maintenance Schedule : " + cstr(submit_ms[0][0]) + " has already been submitted against " +cstr(self.doc.doctype)+ ". Please cancel Maintenance Schedule : "+ cstr(submit_ms[0][0]) + " first and then cancel "+ cstr(self.doc.doctype), raise_exception = 1)
# check maintenance visit
submit_mv = webnotes.conn.sql("select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1",self.doc.name)
if submit_mv:
msgprint("Maintenance Visit : " + cstr(submit_mv[0][0]) + " has already been submitted against " +cstr(self.doc.doctype)+ ". Please cancel Maintenance Visit : " + cstr(submit_mv[0][0]) + " first and then cancel "+ cstr(self.doc.doctype), raise_exception = 1)
# check production order
pro_order = webnotes.conn.sql("""select name from `tabProduction Order` where sales_order = %s and docstatus = 1""", self.doc.name)
if pro_order:
msgprint("""Production Order: %s exists against this sales order.
Please cancel production order first and then cancel this sales order""" %
pro_order[0][0], raise_exception=1)
def check_modified_date(self):
mod_db = webnotes.conn.sql("select modified from `tabSales Order` where name = '%s'" % self.doc.name)
date_diff = webnotes.conn.sql("select TIMEDIFF('%s', '%s')" % ( mod_db[0][0],cstr(self.doc.modified)))
if date_diff and date_diff[0][0]:
msgprint("%s: %s has been modified after you have opened. Please Refresh"
% (self.doc.doctype, self.doc.name), raise_exception=1)
def stop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(-1)
webnotes.conn.set(self.doc, 'status', 'Stopped')
msgprint("""%s: %s has been Stopped. To make transactions against this Sales Order
you need to Unstop it.""" % (self.doc.doctype, self.doc.name))
def unstop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(1)
webnotes.conn.set(self.doc, 'status', 'Submitted')
msgprint("%s: %s has been Unstopped" % (self.doc.doctype, self.doc.name))
def update_stock_ledger(self, update_stock):
from stock.utils import update_bin
for d in self.get_item_list():
if webnotes.conn.get_value("Item", d['item_code'], "is_stock_item") == "Yes":
args = {
"item_code": d['item_code'],
"warehouse": d['reserved_warehouse'],
"reserved_qty": flt(update_stock) * flt(d['reserved_qty']),
"posting_date": self.doc.transaction_date,
"voucher_type": self.doc.doctype,
"voucher_no": self.doc.name,
"is_amended": self.doc.amended_from and 'Yes' or 'No'
}
update_bin(args)
def on_update(self):
pass
def get_portal_page(self):
return "order" if self.doc.docstatus==1 else None
def validate_for_product(self):
chk_dupl_prd = []
for d in getlist(self.doclist,'sales_products'):
if [cstr(d.product_name),cstr(d.description)] in chk_dupl_prd:
msgprint("Product %s has been entered twice. Please change description atleast to continue" % d.product_name)
raise Exception
else:
chk_dupl_prd.append([cstr(d.product_name),cstr(d.description)])
def set_missing_values(source, target):
bean = webnotes.bean(target)
bean.run_method("onload_post_render")
@webnotes.whitelist()
def make_material_request(source_name, target_doclist=None):
def postprocess(source, doclist):
doclist[0].material_request_type = "Purchase"
doclist = get_mapped_doclist("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order_no",
"reserved_warehouse": "warehouse",
"stock_uom": "uom"
}
}
}, target_doclist, postprocess)
return [(d if isinstance(d, dict) else d.fields) for d in doclist]
@webnotes.whitelist()
def make_delivery_note(source_name, target_doclist=None):
def update_item(obj, target, source_parent):
target.amount = (flt(obj.qty) - flt(obj.delivered_qty)) * flt(obj.basic_rate)
target.export_amount = (flt(obj.qty) - flt(obj.delivered_qty)) * flt(obj.export_rate)
target.qty = flt(obj.qty) - flt(obj.delivered_qty)
doclist = get_mapped_doclist("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"field_map": {
"shipping_address": "address_display",
"shipping_address_name": "customer_address",
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"export_rate": "export_rate",
"name": "prevdoc_detail_docname",
"parent": "against_sales_order",
"reserved_warehouse": "warehouse"
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_qty < doc.qty
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doclist, set_missing_values)
return [d.fields for d in doclist]
@webnotes.whitelist()
def make_sales_invoice(source_name, target_doclist=None):
def set_missing_values(source, target):
bean = webnotes.bean(target)
bean.doc.is_pos = 0
bean.run_method("onload_post_render")
def update_item(obj, target, source_parent):
target.export_amount = flt(obj.export_amount) - flt(obj.billed_amt)
target.amount = target.export_amount * flt(source_parent.conversion_rate)
target.qty = obj.export_rate and target.export_amount / flt(obj.export_rate) or obj.qty
doclist = get_mapped_doclist("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order" ,
"reserved_warehouse": "warehouse"
},
"postprocess": update_item,
"condition": lambda doc: doc.amount==0 or doc.billed_amt < doc.export_amount
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doclist, set_missing_values)
return [d.fields for d in doclist]
@webnotes.whitelist()
def make_maintenance_schedule(source_name, target_doclist=None):
maint_schedule = webnotes.conn.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doclist("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "prevdoc_docname"
},
"add_if_empty": True
}
}, target_doclist)
return [d.fields for d in doclist]
@webnotes.whitelist()
def make_maintenance_visit(source_name, target_doclist=None):
visit = webnotes.conn.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doclist("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doclist)
return [d.fields for d in doclist]
| saurabh6790/tru_app_back | selling/doctype/sales_order/sales_order.py | Python | agpl-3.0 | 16,170 | [
"VisIt"
] | f319a73c0a92212eeb18d514d4ce31b3621111b6e36329da891a9ac62b201207 |
"""
Copyright (c) 2009 John Markus Bjoerndalen <jmb@cs.uit.no>,
Brian Vinter <vinter@nbi.dk>, Rune M. Friborg <rune.m.friborg@gmail.com>.
See LICENSE.txt for licensing details (MIT License).
"""
from pycsp_import import *
import sys
@process
def source(chan_out):
for i in range(10):
chan_out("Hello world (%d)\n" % (i))
retire(chan_out)
@process
def sink(chan_in):
while True:
sys.stdout.write(chan_in())
chan = Channel()
Parallel(source(chan.writer()) * 5,
sink(chan.reader()) * 5)
shutdown()
| runefriborg/pycsp | examples/TerminationRaceRetire.py | Python | mit | 550 | [
"Brian"
] | 7446af6971c8759e490160ea8f9b8bcc78c2ddbe8e43ddeead849fc7dd2ed9c9 |
import factory
# http://factoryboy.readthedocs.org/en/latest/index.html
from tao.models import Job, TaoUser, Simulation, GalaxyModel, DataSet, DataSetProperty, StellarModel, Snapshot, BandPassFilter, DustModel, GlobalParameter, WorkflowCommand, SurveyPreset
class UserFactory(factory.Factory):
FACTORY_FOR = TaoUser
username = factory.Sequence(lambda n: 'username' + n)
email = factory.Sequence(lambda n: 'email' + n + '@example.com')
@classmethod
def _prepare(cls, create, **kwargs):
password = kwargs.pop('password', None)
user = super(UserFactory, cls)._prepare(create, **kwargs)
if password:
user.set_password(password)
if create:
user.save()
return user
class SimulationFactory(factory.Factory):
FACTORY_FOR = Simulation
name = factory.Sequence(lambda n: 'simulation_%03d' % int(n))
box_size = factory.Sequence(lambda n: 500 + int(n))
order = factory.Sequence(lambda n: int(n))
details = factory.Sequence(lambda n:
'<a class="simulation-paper" target="_blank" href="http://www.abcd' + str(n) + '.com/">abcd' + str(n) + '</a>' +
'<a class="simulation-link" target="_blank" href="http://www.defg' + str(n) + '.org/">http://www.defg' + str(n) + '.org/</a>' +
'<span class="simulation-cosmology">fairy' + str(n) + '</span>' +
'<span class="simulation-cosmological-parameters">dust' + str(n) + '</span>' +
'<span class="simulation-box-size">' + str(n) + '</span>' +
'<a class="simulation-web-site" target="_blank" href="http://mysite' + str(n) + '.edu/">http://mysite' + str(n) + '.edu/</a>'
)
class GalaxyModelFactory(factory.Factory):
FACTORY_FOR = GalaxyModel
name = factory.Sequence(lambda n: 'galaxy_model_%03d' % int(n))
details = factory.Sequence(lambda n:
'Kind: <span class="galaxy-model-kind">' + 'sometype' + str(n) + '</span>' +
'Paper: <a class="galaxy-model-paper" target="_blank" href="' + 'http://www.xyz' + str(n) + '.com/' + '">' + 'xyz' + str(n) + '</a>'
)
class DataSetFactory(factory.Factory):
FACTORY_FOR = DataSet
available = True
class DataSetPropertyFactory(factory.Factory):
FACTORY_FOR = DataSetProperty
label = factory.Sequence(lambda n: 'parameter_%03d label' % int(n))
name = factory.Sequence(lambda n: 'name_%03d' % int(n))
description = factory.Sequence(lambda n: 'description_%03d' % int(n))
data_type = DataSetProperty.TYPE_INT
is_filter = True
is_output = True
class StellarModelFactory(factory.Factory):
FACTORY_FOR = StellarModel
label = factory.Sequence(lambda n: 'stellar_label_%03d' % int(n))
name = factory.Sequence(lambda n: 'model{n}/sspm.dat'.format(n=n))
description = factory.Sequence(lambda n: '<p>Description ' + n + '</p>')
encoding = factory.Sequence(lambda n: """
<single-stellar-population-model width="{n}">model{n}/sspm.dat</single-stellar-population-model>
<wavelengths-file>model{n}/wavelengths.dat</wavelengths-file>
<ages-file>model{n}/ages.dat</ages-file>
<metallicities-file>model{n}/metallicites.dat</metallicities-file>
""".format(n=n))
class SnapshotFactory(factory.Factory):
FACTORY_FOR = Snapshot
redshift = factory.Sequence(lambda n: str(int(n)/10.))
class BandPassFilterFactory(factory.Factory):
FACTORY_FOR = BandPassFilter
label = factory.Sequence(lambda n: 'Band pass filter %03d' % int(n))
filter_id = factory.Sequence(lambda n: 'Band_pass_filter_%03d.txt' % int(n))
class DustModelFactory(factory.Factory):
FACTORY_FOR = DustModel
name = factory.Sequence(lambda n: 'Dust_model_%03d.dat' % int(n))
label = factory.Sequence(lambda n: 'Dust model %03d' % int(n))
details = factory.Sequence(lambda n: '<p>Detail ' + n + '</p>')
class JobFactory(factory.Factory):
FACTORY_FOR = Job
database = factory.Sequence(lambda n: 'database_' + n)
description = factory.Sequence(lambda n: 'description job ' + n)
@classmethod
def _prepare(cls, create, **kwargs):
created_time = kwargs.pop('created_time', None)
job = super(JobFactory, cls)._prepare(create, **kwargs)
if created_time:
job.created_time = created_time
if create:
job.save()
return job
class GlobalParameterFactory(factory.Factory):
FACTORY_FOR = GlobalParameter
parameter_name = factory.Sequence(lambda n: 'global_%d' % int(n))
parameter_value = factory.Sequence(lambda n: 'global_value_%d' % int(n))
description = factory.Sequence(lambda n: 'description_%d' % int(n))
class WorkflowCommandFactory(factory.Factory):
FACTORY_FOR = WorkflowCommand
class SurveyPresetFactory(factory.Factory):
FACTORY_FOR = SurveyPreset
name = factory.Sequence(lambda n: 'Preset %d' % int(n))
parameters = ''
| IntersectAustralia/asvo-tao | web/tao/tests/support/factories.py | Python | gpl-3.0 | 5,101 | [
"Galaxy"
] | b48a9827dca1e801ce7be35d6950421d5412562ac37b5209f64b2e526bab7c59 |
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
''' This manages remote shared Ansible objects, mainly roles'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.six import string_types
from ansible.errors import AnsibleError
# default_readme_template
# default_meta_template
class Galaxy(object):
''' Keeps global galaxy info '''
def __init__(self, options):
self.options = options
roles_paths = getattr(self.options, 'roles_path', [])
if isinstance(roles_paths, string_types):
self.roles_paths = [os.path.expanduser(roles_path) for roles_path in roles_paths.split(os.pathsep)]
self.roles = {}
# load data path for resource usage
this_dir, this_filename = os.path.split(__file__)
self.DATA_PATH = os.path.join(this_dir, "data")
#TODO: move to getter for lazy loading
self.default_readme = self._str_from_data_file('readme')
self.default_meta = self._str_from_data_file('metadata_template.j2')
self.default_test = self._str_from_data_file('test_playbook.j2')
self.default_travis = self._str_from_data_file('travis.j2')
def add_role(self, role):
self.roles[role.name] = role
def remove_role(self, role_name):
del self.roles[role_name]
def _str_from_data_file(self, filename):
myfile = os.path.join(self.DATA_PATH, filename)
try:
return open(myfile).read()
except Exception as e:
raise AnsibleError("Could not open %s: %s" % (filename, str(e)))
| goozbach/ansible | lib/ansible/galaxy/__init__.py | Python | gpl-3.0 | 2,428 | [
"Brian",
"Galaxy"
] | 7c7b36827fdafbbd40de5cb2e737162e2205af53b91885edbd266806af09054e |
# -*-python-*-
#
# Copyright (C) 1999-2006 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
"""This package provides parsing tools for RCS files."""
from common import *
try:
from tparse import parse
except ImportError:
try:
from texttools import Parser
except ImportError:
from default import Parser
def parse(file, sink):
return Parser().parse(file, sink)
| foresthz/fusion5.1 | www/scm/viewvc/lib/vclib/ccvs/rcsparse/__init__.py | Python | gpl-2.0 | 704 | [
"VisIt"
] | 6cb25f7eb24c0d7c59abfaf03a50736bf4c326c235ca2410d824036f4e3f500f |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**************************************
**espressopp.integrator.MDIntegrator**
**************************************
.. function:: espressopp.integrator.MDIntegrator.addExtension(extension)
:param extension:
:type extension:
:rtype:
.. function:: espressopp.integrator.MDIntegrator.getExtension(k)
:param k:
:type k:
:rtype:
.. function:: espressopp.integrator.MDIntegrator.getNumberOfExtensions()
:rtype:
.. function:: espressopp.integrator.MDIntegrator.run(niter)
:param niter:
:type niter:
:rtype:
"""
from espressopp import pmi
from _espressopp import integrator_MDIntegrator
class MDIntegratorLocal(object):
def run(self, niter):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.run(self, niter)
def addExtension(self, extension):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
# set integrator and connect to it
extension.cxxclass.setIntegrator(extension, self)
extension.cxxclass.connect(extension)
return self.cxxclass.addExtension(self, extension)
def getExtension(self, k):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getExtension(self, k)
def getNumberOfExtensions(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getNumberOfExtensions(self)
if pmi.isController :
class MDIntegrator(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
pmiproperty = [ 'dt', 'step' ],
pmicall = [ 'run', 'addExtension', 'getExtension', 'getNumberOfExtensions' ]
)
| capoe/espressopp.soap | src/integrator/MDIntegrator.py | Python | gpl-3.0 | 2,807 | [
"ESPResSo"
] | bc147dfd00114c39981df4b644a33b8aa05f3049825eca9b98556a3ba982fad9 |
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Writes compressed data from a wiggle file by chromosome.
usage: %prog score_file < wiggle_data
"""
from __future__ import division
import sys
import psyco_full
import bx.wiggle
from bx.binned_array import BinnedArray
from fpconst import isNaN
from bx.cookbook import doc_optparse
from bx import misc
def main():
# Parse command line
options, args = doc_optparse.parse( __doc__ )
try:
score_fname = args[0]
except:
doc_optparse.exit()
scores = {}
for i, ( chrom, pos, val ) in enumerate( bx.wiggle.Reader( open(sys.argv[1]) ) ):
if not chrom in scores: scores[ chrom ] = BinnedArray()
scores[chrom][pos] = val
# Status
if i % 10000 == 0: print i, "scores processed"
for chr in scores.keys():
out = open( chr, "w" )
scores[chr].to_file( out )
out.close()
if __name__ == "__main__": main()
| bxlab/HiFive_Paper | Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/wiggle_to_chr_binned_array.py | Python | bsd-3-clause | 947 | [
"Galaxy"
] | 09487ad05331386f7b49f10ab740d0baef23d5d9b97b8b1586305cc3253b15e2 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Defines classes and methods used for recurrent neuronal networks.
Copyright (C) 2012 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import numpy as np
import os
import scipy.stats as stats
import h5py
from mpi4py import MPI
import neuron
from neuron import units
from .templatecell import TemplateCell
import scipy.sparse as ss
from warnings import warn, filterwarnings
# set up MPI environment
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
def flattenlist(lst):
return [item for sublist in lst for item in sublist]
##########################################################################
# NetworkCell class that has a create_synapse method that
# creates a synapse on the target cell, and a create_spike_detector method that
# allows for connecting to a synapse on a target cell. All other methods and
# attributes are inherited from the standard LFPy.TemplateCell class
##########################################################################
class NetworkCell(TemplateCell):
"""
Similar to `LFPy.TemplateCell` with the addition of some attributes and
methods allowing for spike communication between parallel RANKs.
This class allow using NEURON templates with some limitations.
This takes all the same parameters as the Cell class, but requires three
more template related parameters
Parameters
----------
morphology: str
path to morphology file
templatefile: str
File with cell template definition(s)
templatename: str
Cell template-name used for this cell object
templateargs: str
Parameters provided to template-definition
v_init: float
Initial membrane potential. Default to -65.
Ra: float
axial resistance. Defaults to 150.
cm: float
membrane capacitance. Defaults to 1.0
passive: bool
Passive mechanisms are initialized if True. Defaults to True
passive_parameters: dict
parameter dictionary with values for the passive membrane mechanism in
NEURON ('pas'). The dictionary must contain keys 'g_pas' and 'e_pas',
like the default: passive_parameters=dict(g_pas=0.001, e_pas=-70)
extracellular: bool
switch for NEURON's extracellular mechanism. Defaults to False
dt: float
Simulation time step. Defaults to 2**-4
tstart: float
initialization time for simulation <= 0 ms. Defaults to 0.
tstop: float
stop time for simulation > 0 ms. Defaults to 100.
nsegs_method: 'lambda100' or 'lambda_f' or 'fixed_length' or None
nseg rule, used by NEURON to determine number of compartments.
Defaults to 'lambda100'
max_nsegs_length: float or None
max segment length for method 'fixed_length'. Defaults to None
lambda_f: int
AC frequency for method 'lambda_f'. Defaults to 100
d_lambda: float
parameter for d_lambda rule. Defaults to 0.1
delete_sections: bool
delete pre-existing section-references. Defaults to True
custom_code: list or None
list of model-specific code files ([.py/.hoc]). Defaults to None
custom_fun: list or None
list of model-specific functions with args. Defaults to None
custom_fun_args: list or None
list of args passed to custom_fun functions. Defaults to None
pt3d: bool
use pt3d-info of the cell geometries switch. Defaults to False
celsius: float or None
Temperature in celsius. If nothing is specified here
or in custom code it is 6.3 celcius
verbose: bool
verbose output switch. Defaults to False
Examples
--------
>>> import LFPy
>>> cellParameters = {
>>> 'morphology': '<path to morphology.hoc>',
>>> 'templatefile': '<path to template_file.hoc>',
>>> 'templatename': 'templatename',
>>> 'templateargs': None,
>>> 'v_init': -65,
>>> 'cm': 1.0,
>>> 'Ra': 150,
>>> 'passive': True,
>>> 'passive_parameters': {'g_pas': 0.001, 'e_pas': -65.},
>>> 'dt': 2**-3,
>>> 'tstart': 0,
>>> 'tstop': 50,
>>> }
>>> cell = LFPy.NetworkCell(**cellParameters)
>>> cell.simulate()
See also
--------
Cell
TemplateCell
"""
def __init__(self, **args):
# suppress some warnings if section references belonging to other
# NetworkCell instances are found
filterwarnings(action='ignore',
message="(?=.*(sections detected))",
category=UserWarning)
# instantiate parent class
super().__init__(**args)
# create list netconlist for spike detecting NetCon object(s)
self._hoc_sd_netconlist = neuron.h.List()
# create list of recording device for action potentials
self.spikes = []
# create list of random number generators used with synapse model
self.rng_list = []
# create separate list for networked synapses
self.netconsynapses = []
# create recording device for membrane voltage
self.somav = neuron.h.Vector()
for sec in self.somalist:
self.somav.record(sec(0.5)._ref_v)
def create_synapse(self, cell, sec, x=0.5, syntype=neuron.h.ExpSyn,
synparams=dict(tau=2., e=0.),
assert_syn_values=False):
"""
Create synapse object of type syntype on sec(x) of cell and
append to list cell.netconsynapses
TODO: Use LFPy.Synapse class if possible.
Parameters
----------
cell: object
instantiation of class NetworkCell or similar
sec: neuron.h.Section object,
section reference on cell
x: float in [0, 1],
relative position along section
syntype: hoc.HocObject
NEURON synapse model reference, e.g., neuron.h.ExpSyn
synparams: dict
parameters for syntype, e.g., for neuron.h.ExpSyn we have:
tau: float, synapse time constant
e: float, synapse reversal potential
assert_syn_values: bool
if True, raise AssertionError if synapse attribute values do not
match the values in the synparams dictionary
Raises
------
AssertionError
"""
# create a synapse object on the target cell
syn = syntype(x, sec=sec)
if hasattr(syn, 'setRNG'):
# Create the random number generator for the synapse
rng = neuron.h.Random()
# not sure if this is how it is supposed to be set up...
rng.MCellRan4(
np.random.randint(
0,
2**32 - 1),
np.random.randint(
0,
2**32 - 1))
rng.uniform(0, 1)
# used for e.g., stochastic synapse mechanisms (cf. BBP
# microcircuit portal files)
syn.setRNG(rng)
cell.rng_list.append(rng) # must store ref to rng object
cell.netconsynapses.append(syntype(x, sec=sec))
for key, value in synparams.items():
setattr(cell.netconsynapses[-1], key, value)
# check that synapses are parameterized correctly
if assert_syn_values:
try:
np.testing.assert_almost_equal(
getattr(cell.netconsynapses[-1], key), value)
except AssertionError:
raise AssertionError('{} = {} != {}'.format(
key, getattr(cell.netconsynapses[-1], key), value))
def create_spike_detector(self, target=None, threshold=-10.,
weight=0.0, delay=0.0):
"""
Create spike-detecting NetCon object attached to the cell's soma
midpoint, but this could be extended to having multiple spike-detection
sites. The NetCon object created is attached to the cell's
`_hoc_sd_netconlist` attribute, and will be used by the Network class
when creating connections between all presynaptic cells and
postsynaptic cells on each local RANK.
Parameters
----------
target: None (default) or a NEURON point process
threshold: float
spike detection threshold
weight: float
connection weight (not used unless target is a point process)
delay: float
connection delay (not used unless target is a point process)
"""
# create new NetCon objects for the connections. Activation times will
# be triggered on the somatic voltage with a given threshold.
for sec in self.somalist:
self._hoc_sd_netconlist.append(neuron.h.NetCon(sec(0.5)._ref_v,
target,
sec=sec))
self._hoc_sd_netconlist[-1].threshold = threshold
self._hoc_sd_netconlist[-1].weight[0] = weight
self._hoc_sd_netconlist[-1].delay = delay
class DummyCell(object):
def __init__(self, totnsegs=0,
x=None,
y=None,
z=None,
d=None,
area=None,
length=None,
somainds=None):
"""
Dummy Cell object initialized with all attributes needed for LFP
calculations using the LFPy.RecExtElectrode class and methods.
This cell can be imagined as one "super" cell containing transmembrane
currents generated by all NetworkCell segments on this RANK at once.
Parameters
----------
totnsegs: int
total number of segments
x, y, z: ndarray
arrays of shape (totnsegs, 2) with (x,y,z) coordinates of start
and end points of segments in units of (um)
d: ndarray
array of length totnsegs with segment diameters
area: ndarray
array of segment surface areas
length: ndarray
array of segment lengths
"""
# set attributes
self.totnsegs = totnsegs
self.x = x if x is not None else np.array([])
self.y = y if y is not None else np.array([])
self.z = z if z is not None else np.array([])
self.d = d if d is not None else np.array([])
self.area = area if area is not None else np.array([])
self.length = length if area is not None else np.array([])
self.somainds = somainds if somainds is not None else np.array([])
def get_idx(self, section="soma"):
if section == "soma":
return self.somainds
else:
raise ValueError('section argument must be "soma"')
class NetworkPopulation(object):
"""
NetworkPopulation class representing a group of Cell objects
distributed across RANKs.
Parameters
----------
CWD: path or None
Current working directory
CELLPATH: path or None
Relative path from CWD to source files for cell model
(morphology, hoc routines etc.)
first_gid: int
The global identifier of the first cell created in this population
instance. The first_gid in the first population created should be 0
and cannot exist in previously created NetworkPopulation instances
Cell: class
class defining a Cell object, see class NetworkCell above
POP_SIZE: int
number of cells in population
name: str
population name reference
cell_args: dict
keys and values for Cell object
pop_args: dict
keys and values for Network.draw_rand_pos assigning cell positions
rotation_arg: dict
default cell rotations around x and y axis on the form
{ 'x': np.pi/2, 'y': 0 }. Can only have the keys 'x' and 'y'.
Cells are randomly rotated around z-axis using the
Cell.set_rotation() method.
OUTPUTPATH: str
path to output file destination
"""
def __init__(self, CWD=None, CELLPATH=None, first_gid=0, Cell=NetworkCell,
POP_SIZE=4, name='L5PC',
cell_args=None, pop_args=None,
rotation_args=None,
OUTPUTPATH='example_parallel_network'):
# set class attributes
self.CWD = CWD
self.CELLPATH = CELLPATH
self.first_gid = first_gid
self.Cell = Cell
self.POP_SIZE = POP_SIZE
self.name = name
self.cell_args = cell_args if cell_args is not None else dict()
self.pop_args = pop_args if pop_args is not None else dict()
self.rotation_args = rotation_args if rotation_args is not None \
else dict()
self.OUTPUTPATH = OUTPUTPATH
# create folder for output if it does not exist
if RANK == 0:
if not os.path.isdir(OUTPUTPATH):
os.mkdir(OUTPUTPATH)
COMM.Barrier()
# container of Vector objects used to record times of action potentials
self.spike_vectors = []
# set up population of cells on this RANK
self.gids = [
(i +
first_gid) for i in range(POP_SIZE) if (
i +
first_gid) %
SIZE == RANK]
# we have to enter the cell's corresponding file directory to
# create cell because how EPFL set their code up
if CWD is not None:
os.chdir(os.path.join(CWD, CELLPATH, self.name))
self.cells = [Cell(**cell_args) for gid in self.gids]
os.chdir(CWD)
else:
self.cells = [Cell(**cell_args) for gid in self.gids]
# position each cell's soma in space
self.soma_pos = self.draw_rand_pos(POP_SIZE=len(self.gids), **pop_args)
for i, cell in enumerate(self.cells):
cell.set_pos(**self.soma_pos[i])
# assign a random rotation around the z-axis of each cell
self.rotations = np.random.uniform(0, np.pi * 2, len(self.gids))
assert 'z' not in self.rotation_args.keys()
for i, cell in enumerate(self.cells):
cell.set_rotation(z=self.rotations[i], **self.rotation_args)
# assign gid to each cell
for gid, cell in zip(self.gids, self.cells):
cell.gid = gid
# gather gids, soma positions and cell rotations to RANK 0, and write
# as structured array.
if RANK == 0:
populationData = flattenlist(COMM.gather(
zip(self.gids, self.soma_pos, self.rotations)))
# create structured array for storing data
dtype = [('gid', 'i8'), ('x', float), ('y', float), ('z', float),
('x_rot', float), ('y_rot', float), ('z_rot', float)]
popDataArray = np.empty((len(populationData, )), dtype=dtype)
for i, (gid, pos, z_rot) in enumerate(populationData):
popDataArray[i]['gid'] = gid
popDataArray[i]['x'] = pos['x']
popDataArray[i]['y'] = pos['y']
popDataArray[i]['z'] = pos['z']
popDataArray[i]['x_rot'] = np.pi / 2
popDataArray[i]['y_rot'] = 0.
popDataArray[i]['z_rot'] = z_rot
# Dump to hdf5 file, append to file if it exists
f = h5py.File(os.path.join(self.OUTPUTPATH,
'cell_positions_and_rotations.h5'), 'a')
# delete old entry if it exist
if self.name in f.keys():
del f[self.name]
assert self.name not in f.keys()
f[self.name] = popDataArray
f.close()
else:
COMM.gather(zip(self.gids, self.soma_pos, self.rotations))
# sync
COMM.Barrier()
def draw_rand_pos(self, POP_SIZE, radius, loc, scale, cap=None):
"""
Draw some random location for POP_SIZE cells within radius radius,
at mean depth loc and standard deviation scale.
Returned argument is a list of dicts [{'x', 'y', 'z'},].
Parameters
----------
POP_SIZE: int
Population size
radius: float
Radius of population.
loc: float
expected mean depth of somas of population.
scale: float
expected standard deviation of depth of somas of population.
cap: None, float or length to list of floats
if float, cap distribution between [loc-cap, loc+cap),
if list, cap distribution between [loc-cap[0], loc+cap[1]]
Returns
-------
soma_pos: list
List of dicts of len POP_SIZE
where dict have keys x, y, z specifying
xyz-coordinates of cell at list entry `i`.
"""
x = np.empty(POP_SIZE)
y = np.empty(POP_SIZE)
z = np.empty(POP_SIZE)
for i in range(POP_SIZE):
x[i] = (np.random.rand() - 0.5) * radius * 2
y[i] = (np.random.rand() - 0.5) * radius * 2
while np.sqrt(x[i]**2 + y[i]**2) >= radius:
x[i] = (np.random.rand() - 0.5) * radius * 2
y[i] = (np.random.rand() - 0.5) * radius * 2
z = np.random.normal(loc=loc, scale=scale, size=POP_SIZE)
if cap is not None:
if type(cap) in [float, np.float32, np.float64]:
while not np.all((z >= loc - cap) & (z < loc + cap)):
inds = (z < loc - cap) ^ (z > loc + cap)
z[inds] = np.random.normal(loc=loc, scale=scale,
size=inds.sum())
elif isinstance(cap, list):
assert len(cap) == 2, \
'cap = {} is not a length 2 list'.format(float)
while not np.all((z >= loc - cap[0]) & (z < loc + cap[1])):
inds = (z < loc - cap[0]) ^ (z > loc + cap[1])
z[inds] = np.random.normal(loc=loc, scale=scale,
size=inds.sum())
else:
raise Exception('cap = {} is not None'.format(float),
'a float or length 2 list of floats')
soma_pos = []
for i in range(POP_SIZE):
soma_pos.append({'x': x[i], 'y': y[i], 'z': z[i]})
return soma_pos
class Network(object):
"""
Network class, creating distributed populations of cells of
type Cell and handling connections between cells in the respective
populations.
Parameters
----------
dt: float
Simulation timestep size
tstart: float
Start time of simulation
tstop: float
End time of simulation
v_init: float
Membrane potential set at first timestep across all cells
celsius: float
Global control of temperature, affect channel kinetics.
It will also be forced when creating the different Cell objects, as
LFPy.Cell and LFPy.TemplateCell also accept the same keyword
argument.
verbose: bool
if True, print out misc. messages
"""
def __init__(
self,
dt=0.1,
tstart=0.,
tstop=1000.,
v_init=-65.,
celsius=6.3,
OUTPUTPATH='example_parallel_network',
verbose=False):
# set attributes
self.dt = dt
self.tstart = tstart
self.tstop = tstop
self.v_init = v_init
self.celsius = celsius
self.OUTPUTPATH = OUTPUTPATH
self.verbose = verbose
# we need NEURON's ParallelContext for communicating NetCon events
self.pc = neuron.h.ParallelContext()
# create empty list for connections between cells (not to be confused
# with each cell's list of netcons _hoc_netconlist)
self._hoc_netconlist = neuron.h.List()
# The different populations in the Network will be collected in
# a dictionary of NetworkPopulation object, where the keys represent
# population names. The names are also put in a list ordered according
# to the order populations are created in (as some operations rely on
# this particular order)
self.populations = dict()
self.population_names = []
def create_population(self, CWD=None, CELLPATH=None, Cell=NetworkCell,
POP_SIZE=4, name='L5PC',
cell_args=None, pop_args=None,
rotation_args=None):
"""
Create and append a distributed POP_SIZE-sized population of cells of
type Cell with the corresponding name. Cell-object references, gids on
this RANK, population size POP_SIZE and names will be added to the
lists Network.gids, Network.cells, Network.sizes and Network.names,
respectively
Parameters
----------
CWD: path
Current working directory
CELLPATH: path
Relative path from CWD to source files for cell model
(morphology, hoc routines etc.)
Cell: class
class defining a Cell-like object, see class NetworkCell
POP_SIZE: int
number of cells in population
name: str
population name reference
cell_args: dict
keys and values for Cell object
pop_args: dict
keys and values for Network.draw_rand_pos assigning cell positions
rotation_arg: dict
default cell rotations around x and y axis on the form
{ 'x': np.pi/2, 'y': 0 }. Can only have the keys 'x' and 'y'.
Cells are randomly rotated around z-axis using the
Cell.set_rotation method.
"""
assert name not in self.populations.keys(), \
'population name {} already taken'.format(name)
# compute the first global id of this new population, based
# on population sizes of existing populations
first_gid = 0
for p in self.populations.values():
first_gid += p.POP_SIZE
# create NetworkPopulation object
population = NetworkPopulation(
CWD=CWD,
CELLPATH=CELLPATH,
first_gid=first_gid,
Cell=Cell,
POP_SIZE=POP_SIZE,
name=name,
cell_args=cell_args,
pop_args=pop_args,
rotation_args=rotation_args,
OUTPUTPATH=self.OUTPUTPATH)
# associate gids of cells on this RANK such that NEURON can look up
# at which RANK different cells are created when connecting the network
for gid in population.gids:
self.pc.set_gid2node(gid, RANK)
# Prepare connection targets by iterating over local neurons in pop.
for gid, cell in zip(population.gids, population.cells):
# attach NetCon source (spike detektor) to each cell's soma with no
# target to cell gid
cell.create_spike_detector(None)
# assosiate cell gid with the NetCon source
self.pc.cell(gid, cell._hoc_sd_netconlist[-1])
# record spike events
population.spike_vectors.append(neuron.h.Vector())
cell._hoc_sd_netconlist[-1].record(population.spike_vectors[-1])
# add population object to dictionary of populations
self.populations[name] = population
# append population name to list (Network.populations.keys() not
# unique)
self.population_names.append(name)
def get_connectivity_rand(self, pre='L5PC', post='L5PC', connprob=0.2):
"""
Dummy function creating a (boolean) cell to cell connectivity matrix
between pre and postsynaptic populations.
Connections are drawn randomly between presynaptic cell gids in
population 'pre' and postsynaptic cell gids in 'post' on this RANK with
a fixed connection probability. self-connections are disabled if
presynaptic and postsynaptic populations are the same.
Parameters
----------
pre: str
presynaptic population name
post: str
postsynaptic population name
connprob: float in [0, 1]
connection probability, connections are drawn on random
Returns
-------
ndarray, dtype bool
n_pre x n_post array of connections between n_pre presynaptic
neurons and n_post postsynaptic neurons on this RANK. Entries
with True denotes a connection.
"""
n_pre = self.populations[pre].POP_SIZE
gids = np.array(self.populations[post].gids).astype(int)
# first check if there are any postsyn cells on this RANK
if gids.size > 0:
# define incoming connections for cells on this RANK
C = np.random.binomial(n=1, p=connprob,
size=(n_pre, gids.size)
).astype(bool)
if pre == post:
# avoid self connections.
gids_pre, gids_post = np.where(C)
gids_pre += self.populations[pre].first_gid
gids_post *= SIZE # asssume round-robin distribution of gids
gids_post += self.populations[post].gids[0]
inds = gids_pre != gids_post
gids_pre = gids_pre[inds]
gids_pre -= self.populations[pre].first_gid
gids_post = gids_post[inds]
gids_post -= self.populations[post].gids[0]
gids_post //= SIZE
c = np.c_[gids_pre, gids_post]
# create boolean matrix
C = ss.csr_matrix((np.ones(gids_pre.shape[0], dtype=bool),
(c[:, 0], c[:, 1])),
shape=(n_pre, gids.size), dtype=bool)
return C.toarray()
else:
return C
else:
return np.zeros((n_pre, 0), dtype=bool)
def connect(self, pre, post, connectivity,
syntype=neuron.h.ExpSyn,
synparams=dict(tau=2., e=0.),
weightfun=np.random.normal,
weightargs=dict(loc=0.1, scale=0.01),
minweight=0,
delayfun=stats.truncnorm,
delayargs=dict(a=0.3, b=np.inf, loc=2, scale=0.2),
mindelay=None,
multapsefun=stats.truncnorm,
multapseargs=dict(a=(1 - 4) / 1.,
b=(10 - 4) / 1,
loc=4,
scale=1),
syn_pos_args=dict(section=['soma', 'dend', 'apic'],
fun=[stats.norm] * 2,
funargs=[dict(loc=0, scale=100)] * 2,
funweights=[0.5] * 2,
z_min=-1E6, z_max=1E6,
),
save_connections=False,
):
"""
Connect presynaptic cells to postsynaptic cells. Connections are
drawn from presynaptic cells to postsynaptic cells, hence connectivity
array must only be specified for postsynaptic units existing on this
RANK.
Parameters
----------
pre: str
presynaptic population name
post: str
postsynaptic population name
connectivity: ndarray / (scipy.sparse array)
boolean connectivity matrix between pre and post.
syntype: hoc.HocObject
reference to NEURON synapse mechanism, e.g., ``neuron.h.ExpSyn``
synparams: dict
dictionary of parameters for synapse mechanism, keys 'e', 'tau'
etc.
weightfun: function
function used to draw weights from a numpy.random distribution
weightargs: dict
parameters passed to weightfun
minweight: float,
minimum weight in units of nS
delayfun: function
function used to draw delays from a subclass of
scipy.stats.rv_continuous or numpy.random distribution
delayargs: dict
parameters passed to ``delayfun``
mindelay: float,
minimum delay in multiples of dt. Ignored if ``delayfun`` is an
inherited from ``scipy.stats.rv_continuous``
multapsefun: function or None
function reference, e.g., ``scipy.stats.rv_continuous`` used to
draw a number of synapses for a cell-to-cell connection.
If None, draw only one connection
multapseargs: dict
arguments passed to multapsefun
syn_pos_args: dict
arguments passed to inherited ``LFPy.Cell`` method
``NetworkCell.get_rand_idx_area_and_distribution_norm`` to find
synapse locations.
save_connections: bool
if True (default False), save instantiated connections to HDF5 file
``Network.OUTPUTPATH/synapse_connections.h5`` as dataset
``<pre>:<post>`` using a structured ndarray with dtype
::
[('gid_pre'), ('gid', 'i8'), ('weight', 'f8'), ('delay', 'f8'),
('sec', 'U64'), ('sec.x', 'f8'),
('x', 'f8'), ('y', 'f8'), ('z', 'f8')],
where ``gid_pre`` is presynapic cell id,
``gid`` is postsynaptic cell id,
``weight`` connection weight, ``delay`` connection delay,
``sec`` section name, ``sec.x`` relative location on section,
and ``x``, ``y``, ``z`` the corresponding
midpoint coordinates of the target compartment.
Returns
-------
list
Length 2 list with ndarrays [conncount, syncount] with numbers of
instantiated connections and synapses.
Raises
------
DeprecationWarning
if ``delayfun`` is not a subclass of ``scipy.stats.rv_continuous``
"""
# check if delayfun is a scipy.stats.rv_continuous like function that
# provides a function `rvs` for random variates.
# Otherwise, raise some warnings
if not hasattr(delayfun, 'rvs'):
warn(f'argument delayfun={delayfun.__str__()} do not appear ' +
'scipy.stats.rv_continuous or scipy.stats.rv_discrete like ' +
'and will be deprecated in the future')
else:
if mindelay is not None:
warn(f'mindelay={mindelay} not usable with ' +
f'delayfun={delayfun.__str__()}')
# set up connections from all cells in presynaptic to post across RANKs
n0 = self.populations[pre].first_gid
# gids of presynaptic neurons:
gids_pre = np.arange(n0, n0 + self.populations[pre].POP_SIZE)
# count connections and synapses made on this RANK
conncount = connectivity.astype(int).sum()
syncount = 0
# keep track of synapse positions for this connect
# call on this rank such that these can be communicated and stored
syn_idx_pos = []
# iterate over gids on this RANK and create connections
for i, (gid_post, cell) in enumerate(zip(self.populations[post].gids,
self.populations[post].cells)
):
# do NOT iterate over all possible presynaptic neurons
for gid_pre in gids_pre[connectivity[:, i]]:
# throw a warning if sender neuron is identical to receiving
# neuron
if gid_post == gid_pre:
print(
'connecting cell w. gid {} to itself (RANK {})'.format(
gid_post, RANK))
# assess number of synapses
if multapsefun is None:
nidx = 1
else:
if hasattr(multapsefun, 'pdf'):
# assume we're dealing with a scipy.stats.rv_continuous
# like method. Then evaluate pdf at positive integer
# values and feed as custom scipy.stats.rv_discrete
# distribution
d = multapsefun(**multapseargs)
# number of multapses must be on interval [1, 100]
xk = np.arange(1, 100)
pk = d.pdf(xk)
pk /= pk.sum()
nidx = stats.rv_discrete(values=(xk, pk)).rvs()
# this aint pretty:
mssg = (
'multapsefun: '
+ multapsefun(**multapseargs).__str__()
+ f'w. multapseargs: {multapseargs} resulted '
+ f'in {nidx} synapses'
)
assert nidx >= 1, mssg
elif hasattr(multapsefun, 'pmf'):
# assume we're dealing with a scipy.stats.rv_discrete
# like method that can be used to generate random
# variates directly
nidx = multapsefun(**multapseargs).rvs()
mssg = (
f'multapsefun: {multapsefun().__str__()} w. '
+ f'multapseargs: {multapseargs} resulted in '
+ f'{nidx} synapses'
)
assert nidx >= 1, mssg
else:
warn(f'multapsefun{multapsefun.__str__()} will be ' +
'deprecated. Use scipy.stats.rv_continuous or ' +
'scipy.stats.rv_discrete like methods instead')
nidx = 0
j = 0
while nidx <= 0 and j < 1000:
nidx = int(round(multapsefun(**multapseargs)))
j += 1
if j == 1000:
raise Exception(
'change multapseargs as no positive '
'synapse # was found in 1000 trials')
# find synapse locations and corresponding section names
idxs = cell.get_rand_idx_area_and_distribution_norm(
nidx=nidx, **syn_pos_args)
secs = cell.get_idx_name(idxs)
# draw weights
weights = weightfun(size=nidx, **weightargs)
# redraw weights less that minweight
while np.any(weights < minweight):
j = weights < minweight
weights[j] = weightfun(size=j.sum(), **weightargs)
# draw delays
if hasattr(delayfun, 'rvs'):
delays = delayfun(**delayargs).rvs(size=nidx)
# check that all delays are > dt
try:
assert np.all(delays >= self.dt)
except AssertionError as ae:
raise ae(
f'the delayfun parameter a={delayargs["a"]} '
+ f'resulted in delay less than dt={self.dt}'
)
else:
delays = delayfun(size=nidx, **delayargs)
# redraw delays shorter than mindelay
while np.any(delays < mindelay):
j = delays < mindelay
delays[j] = delayfun(size=j.sum(), **delayargs)
for i, ((idx, secname, secx), weight, delay) in enumerate(
zip(secs, weights, delays)):
cell.create_synapse(
cell,
# TODO: Find neater way of accessing
# Section reference, this looks slow
sec=list(
cell.allseclist)[
np.where(
np.array(
cell.allsecnames) == secname)[0][0]],
x=secx,
syntype=syntype,
synparams=synparams)
# connect up NetCon object
nc = self.pc.gid_connect(gid_pre, cell.netconsynapses[-1])
nc.weight[0] = weight
nc.delay = delays[i]
self._hoc_netconlist.append(nc)
# store also synapse indices allowing for computing LFPs
# from syn.i
cell.synidx.append(idx)
# store gid and xyz-coordinate of synapse positions
syn_idx_pos.append((gid_pre,
cell.gid,
weight,
delays[i],
secname,
secx,
cell.x[idx].mean(axis=-1),
cell.y[idx].mean(axis=-1),
cell.z[idx].mean(axis=-1)))
syncount += nidx
conncount = COMM.reduce(conncount, op=MPI.SUM, root=0)
syncount = COMM.reduce(syncount, op=MPI.SUM, root=0)
if RANK == 0:
print('Connected population {} to {}'.format(pre, post),
'by {} connections and {} synapses'.format(conncount,
syncount))
else:
conncount = None
syncount = None
# gather and write syn_idx_pos data
if save_connections:
if RANK == 0:
synData = flattenlist(COMM.gather(syn_idx_pos))
# convert to structured array
dtype = [('gid_pre', 'i8'),
('gid', 'i8'),
('weight', 'f8'),
('delay', 'f8'),
('sec', 'S64'),
('sec.x', 'f8'),
('x', 'f8'),
('y', 'f8'),
('z', 'f8')]
synDataArray = np.empty((len(synData), ), dtype=dtype)
for i, (gid_pre, gid, weight, delay, secname, secx, x, y, z
) in enumerate(synData):
synDataArray[i]['gid_pre'] = gid_pre
synDataArray[i]['gid'] = gid
synDataArray[i]['weight'] = weight
synDataArray[i]['delay'] = delay
synDataArray[i]['sec'] = secname
synDataArray[i]['sec.x'] = secx
synDataArray[i]['x'] = x
synDataArray[i]['y'] = y
synDataArray[i]['z'] = z
# Dump to hdf5 file, append to file if entry exists
with h5py.File(os.path.join(self.OUTPUTPATH,
'synapse_connections.h5'),
'a') as f:
key = '{}:{}'.format(pre, post)
if key in f.keys():
del f[key]
assert key not in f.keys()
f[key] = synDataArray
# save global connection data (synapse type/parameters)
# equal for all synapses
try:
grp = f.create_group('synparams')
except ValueError:
grp = f['synparams']
try:
subgrp = grp.create_group(key)
except ValueError:
subgrp = grp[key]
subgrp['mechanism'] = syntype.__str__().strip('()')
for key, value in synparams.items():
subgrp[key] = value
else:
COMM.gather(syn_idx_pos)
return COMM.bcast([conncount, syncount])
def enable_extracellular_stimulation(self, electrode, t_ext=None, n=1,
model='inf'):
"""
Enable extracellular stimulation with NEURON's `extracellular`
mechanism. Extracellular potentials are computed from electrode
currents using the point-source approximation.
If ``model`` is ``'inf'`` (default), potentials are computed as
(:math:`r_i` is the position of a compartment :math:`i`,
:math:`r_n` is the position of an electrode :math:`n`,
:math:`\sigma` is the conductivity of the medium):
.. math::
V_e(r_i) = \sum_n \\frac{I_n}{4 \pi \sigma |r_i - r_n|}
If ``model`` is ``'semi'``, the method of images is used:
.. math::
V_e(r_i) = \sum_n \\frac{I_n}{2 \pi \sigma |r_i - r_n|}
Parameters
----------
electrode: RecExtElectrode
Electrode object with stimulating currents
t_ext: np.ndarray or list
Time in ms corresponding to step changes in the provided currents.
If None, currents are assumed to have
the same time steps as the NEURON simulation.
n: int
Points per electrode for spatial averaging
model: str
``'inf'`` or ``'semi'``. If ``'inf'`` the medium is assumed to be
infinite and homogeneous. If ``'semi'``, the method of
images is used.
Returns
-------
v_ext: dict of np.ndarrays
Computed extracellular potentials at cell mid points
for each cell of the network's populations. Formatted as
``v_ext = {'pop1': np.ndarray[cell, cell_seg,t_ext]}``
"""
v_ext = {}
for popname in self.populations.keys():
cells = self.populations[popname].cells
v_ext[popname] = np.zeros(
(len(cells), cells[0].totnsegs, len(t_ext)))
for id_cell, cell in enumerate(cells):
v_ext[popname][id_cell] = \
cell.enable_extracellular_stimulation(
electrode, t_ext, n, model)
return v_ext
def simulate(self, probes=None,
rec_imem=False, rec_vmem=False,
rec_ipas=False, rec_icap=False,
rec_isyn=False, rec_vmemsyn=False, rec_istim=False,
rec_pop_contributions=False,
rec_variables=[], variable_dt=False, atol=0.001,
to_memory=True, to_file=False,
file_name='OUTPUT.h5',
**kwargs):
"""
This is the main function running the simulation of the network model.
Parameters
----------
probes: list of :obj:, optional
None or list of LFPykit.RecExtElectrode like object instances that
each have a public method `get_transformation_matrix` returning
a matrix that linearly maps each compartments' transmembrane
current to corresponding measurement as
.. math:: \\mathbf{P} = \\mathbf{M} \\mathbf{I}
rec_imem: bool
If true, segment membrane currents will be recorded
If no electrode argument is given, it is necessary to
set rec_imem=True in order to calculate LFP later on.
Units of (nA).
rec_vmem: bool
record segment membrane voltages (mV)
rec_ipas: bool
record passive segment membrane currents (nA)
rec_icap: bool
record capacitive segment membrane currents (nA)
rec_isyn: bool
record synaptic currents of from Synapse class (nA)
rec_vmemsyn: bool
record membrane voltage of segments with Synapse (mV)
rec_istim: bool
record currents of StimIntraElectrode (nA)
rec_pop_contributions: bool
If True, compute and return single-population contributions to
the extracellular potential during simulation time
rec_variables: list of str
variables to record, i.e arg=['cai', ]
variable_dt: boolean
use variable timestep in NEURON. Can not be combimed with `to_file`
atol: float
absolute tolerance used with NEURON variable timestep
to_memory: bool
Simulate to memory. Only valid with `probes=[<probe>, ...]`, which
store measurements to -> <probe>.data
to_file: bool
only valid with `probes=[<probe>, ...]`, saves measurement in
hdf5 file format.
file_name: str
If to_file is True, file which measurements will be
written to. The file format is HDF5, default is "OUTPUT.h5", put
in folder Network.OUTPUTPATH
**kwargs: keyword argument dict values passed along to function
`__run_simulation_with_probes()`, containing some or all of
the boolean flags: `use_ipas`, `use_icap`, `use_isyn`
(defaulting to `False`).
Returns
-------
events
Dictionary with keys `times` and `gids`, where values are
ndarrays with detected spikes and global neuron identifiers
Raises
------
Exception
if `CVode().use_fast_imem()` method not found
AssertionError
if rec_pop_contributions==True and probes==None
"""
# set up integrator, use the CVode().fast_imem method by default
# as it doesn't hurt sim speeds much if at all.
cvode = neuron.h.CVode()
try:
cvode.use_fast_imem(1)
except AttributeError:
raise Exception('neuron.h.CVode().use_fast_imem() not found. '
'Please update NEURON to v.7.4 or newer')
# test some of the inputs
if probes is None:
assert rec_pop_contributions is False, \
'rec_pop_contributions can not be True when probes is None'
if not variable_dt:
dt = self.dt
else:
dt = None
for name in self.population_names:
for cell in self.populations[name].cells:
cell._set_soma_volt_recorder(dt)
if rec_imem:
cell._set_imem_recorders(dt)
if rec_vmem:
cell._set_voltage_recorders(dt)
if rec_ipas:
cell._set_ipas_recorders(dt)
if rec_icap:
cell._set_icap_recorders(dt)
if len(rec_variables) > 0:
cell._set_variable_recorders(rec_variables)
# run fadvance until t >= tstop, and calculate LFP if asked for
if probes is None and not rec_pop_contributions and not to_file:
if not rec_imem:
if self.verbose:
print("rec_imem==False, not recording membrane currents!")
self.__run_simulation(cvode, variable_dt, atol)
else:
self.__run_simulation_with_probes(
cvode=cvode,
probes=probes,
variable_dt=variable_dt,
atol=atol,
to_memory=to_memory,
to_file=to_file,
file_name='tmp_output_RANK_{:03d}.h5',
rec_pop_contributions=rec_pop_contributions,
**kwargs)
for name in self.population_names:
for cell in self.populations[name].cells:
# somatic trace
cell.somav = np.array(cell.somav)
if rec_imem:
cell._calc_imem()
if rec_ipas:
cell._calc_ipas()
if rec_icap:
cell._calc_icap()
if rec_vmem:
cell._collect_vmem()
if rec_isyn:
cell._collect_isyn()
if rec_vmemsyn:
cell._collect_vsyn()
if rec_istim:
cell._collect_istim()
if len(rec_variables) > 0:
cell._collect_rec_variables(rec_variables)
if hasattr(cell, '_hoc_netstimlist'):
del cell._hoc_netstimlist
# Collect spike trains across all RANKs to RANK 0
for name in self.population_names:
population = self.populations[name]
for i in range(len(population.spike_vectors)):
population.spike_vectors[i] = \
np.array(population.spike_vectors[i])
if RANK == 0:
times = []
gids = []
for i, name in enumerate(self.population_names):
times.append([])
gids.append([])
times[i] += [x for x in self.populations[name].spike_vectors]
gids[i] += [x for x in self.populations[name].gids]
for j in range(1, SIZE):
times[i] += COMM.recv(source=j, tag=13)
gids[i] += COMM.recv(source=j, tag=14)
else:
times = None
gids = None
for name in self.population_names:
COMM.send([x for x in self.populations[name].spike_vectors],
dest=0, tag=13)
COMM.send([x for x in self.populations[name].gids],
dest=0, tag=14)
# create final output file, summing up single RANK output from
# temporary files
if to_file and probes is not None:
op = MPI.SUM
fname = os.path.join(self.OUTPUTPATH,
'tmp_output_RANK_{:03d}.h5'.format(RANK))
f0 = h5py.File(fname, 'r')
if RANK == 0:
f1 = h5py.File(os.path.join(self.OUTPUTPATH, file_name), 'w')
dtype = []
for key, value in f0[list(f0.keys())[0]].items():
dtype.append((str(key), float))
for grp in f0.keys():
if RANK == 0:
# get shape from the first dataset
# (they should all be equal):
for value in f0[grp].values():
shape = value.shape
continue
f1[grp] = np.zeros(shape, dtype=dtype)
for key, value in f0[grp].items():
if RANK == 0:
recvbuf = np.zeros(shape, dtype=float)
else:
recvbuf = None
COMM.Reduce(value[()].astype(float), recvbuf,
op=op, root=0)
if RANK == 0:
f1[grp][key] = recvbuf
f0.close()
if RANK == 0:
f1.close()
os.remove(fname)
if probes is not None:
if to_memory:
# communicate and sum up measurements on each probe before
# returing spike times and corresponding gids:
for probe in probes:
probe.data = ReduceStructArray(probe.data)
return dict(times=times, gids=gids)
def __create_network_dummycell(self):
"""
set up parameters for a DummyCell object, allowing for computing
the sum of all single-cell LFPs at each timestep, essentially
creating one supercell with all segments of all cell objects
present on this RANK.
"""
# compute the total number of segments per population on this RANK
nsegs = [[cell.totnsegs for cell in self.populations[name].cells]
for name in self.population_names]
for i, nseg in enumerate(nsegs):
if nseg == []:
nsegs[i] = [0]
for i, y in enumerate(nsegs):
nsegs[i] = np.sum(y)
nsegs = np.array(nsegs, dtype=int)
totnsegs = nsegs.sum()
x = np.empty((0, 2))
y = np.empty((0, 2))
z = np.empty((0, 2))
d = np.array([])
area = np.array([])
length = np.array([])
somainds = np.array([], dtype=int)
nseg = 0
for name in self.population_names:
for cell in self.populations[name].cells:
x = np.r_[x, cell.x]
y = np.r_[y, cell.y]
z = np.r_[z, cell.z]
d = np.r_[d, cell.d]
area = np.r_[area, cell.area]
length = np.r_[length, cell.length]
somainds = np.r_[somainds, cell.get_idx("soma") + nseg]
nseg += cell.totnsegs
# return number of segments per population and DummyCell object
return nsegs, DummyCell(totnsegs, x, y, z, d, area, length, somainds)
def __run_simulation(self, cvode, variable_dt=False, atol=0.001):
"""
Running the actual simulation in NEURON, simulations in NEURON
are now interruptable.
Parameters
----------
cvode: neuron.h.CVode() object
variable_dt: bool
switch for variable-timestep method
atol: float
absolute tolerance with CVode for variable time-step method
"""
# set maximum integration step, it is necessary for communication of
# spikes across RANKs to occur.
self.pc.set_maxstep(10)
# time resolution
neuron.h.dt = self.dt
# needed for variable dt method
if variable_dt:
cvode.active(1)
cvode.atol(atol)
else:
cvode.active(0)
# initialize state
neuron.h.finitialize(self.v_init * units.mV)
# initialize current- and record
if cvode.active():
cvode.re_init()
else:
neuron.h.fcurrent()
neuron.h.frecord_init()
# Starting simulation at tstart
neuron.h.t = self.tstart
# only needed if LFPy.Synapse classes are used.
for name in self.population_names:
for cell in self.populations[name].cells:
cell._load_spikes()
# advance simulation until tstop
neuron.h.continuerun(self.tstop * units.ms)
def __run_simulation_with_probes(self, cvode,
probes=None,
variable_dt=False,
atol=0.001,
rtol=0.,
to_memory=True,
to_file=False,
file_name=None,
use_ipas=False, use_icap=False,
use_isyn=False,
rec_pop_contributions=False
):
"""
Running the actual simulation in NEURON with list of probes.
Each object in `probes` must have a public method
`get_transformation_matrix` which returns a linear mapping of
transmembrane currents to corresponding measurement.
Parameters
----------
cvode: neuron.h.CVode() object
probes: list of :obj:, optional
None or list of LFPykit.RecExtElectrode like object instances that
each have a public method `get_transformation_matrix` returning
a matrix that linearly maps each compartments' transmembrane
current to corresponding measurement as
.. math:: \\mathbf{P} = \\mathbf{M} \\mathbf{I}
variable_dt: bool
switch for variable-timestep method
atol: float
absolute tolerance with CVode for variable time-step method
rtol: float
relative tolerance with CVode for variable time-step method
to_memory: bool
Boolean flag for computing extracellular potentials,
default is True.
If True, the corresponding <probe>.data attribute will be set.
to_file: bool or None
Boolean flag for computing extracellular potentials to file
<OUTPUTPATH/file_name>, default is False. Raises an Exception if
`to_memory` is True.
file_name: formattable str
If to_file is True, file which extracellular potentials will be
written to. The file format is HDF5, default is
"output_RANK_{:03d}.h5". The output is written per RANK, and the
RANK # will be inserted into the corresponding file name.
use_ipas: bool
if True, compute the contribution to extracellular potentials
across the passive leak channels embedded in the cells membranes
summed over populations
use_icap: bool
if True, compute the contribution to extracellular potentials
across the membrane capacitance embedded in the cells membranes
summed over populations
use_isyn: bool
if True, compute the contribution to extracellular potentials
across the excitatory and inhibitory synapses embedded in the cells
membranes summed over populations
rec_pop_contributions: bool
if True, compute and return single-population contributions to the
extracellular potential during each time step of the simulation
Returns
-------
Raises
------
Exception:
- `if to_memory == to_file == True`
- `if to_file == True and file_name is None`
- `if to_file == variable_dt == True`
- `if <probe>.cell is not None`
"""
if to_memory and to_file:
raise Exception('to_memory and to_file can not both be True')
if to_file and file_name is None:
raise Exception
# create a dummycell object lumping together needed attributes
# for calculation of extracellular potentials etc. The population_nsegs
# array is used to slice indices such that single-population
# contributions to the potential can be calculated.
population_nsegs, network_dummycell = self.__create_network_dummycell()
# set cell attribute on each probe, assuming that each probe was
# instantiated with argument cell=None
for probe in probes:
if probe.cell is None:
probe.cell = network_dummycell
else:
raise Exception('{}.cell!=None'.format(probe.__class__))
# create list of transformation matrices; one for each probe
transforms = []
if probes is not None:
for probe in probes:
transforms.append(probe.get_transformation_matrix())
# reset probe.cell to None, as it is no longer needed
for probe in probes:
probe.cell = None
# set maximum integration step, it is necessary for communication of
# spikes across RANKs to occur.
# NOTE: Should this depend on the minimum delay in the network?
self.pc.set_maxstep(10)
# Initialize NEURON simulations of cell object
neuron.h.dt = self.dt
# needed for variable dt method
if variable_dt:
cvode.active(1)
cvode.atol(atol)
else:
cvode.active(0)
# initialize state
neuron.h.finitialize(self.v_init * units.mV)
# use fast calculation of transmembrane currents
cvode.use_fast_imem(1)
# initialize current- and record
if cvode.active():
cvode.re_init()
else:
neuron.h.fcurrent()
neuron.h.frecord_init()
# Starting simulation at tstart
neuron.h.t = self.tstart
# create list of cells across all populations to simplify loops
cells = []
for name in self.population_names:
cells += self.populations[name].cells
# load spike times from NetCon, only needed if LFPy.Synapse class
# is used
for cell in cells:
cell._load_spikes()
# define data type for structured arrays dependent on the boolean
# arguments
dtype = [('imem', float)]
if use_ipas:
dtype += [('ipas', float)]
if use_icap:
dtype += [('icap', float)]
if use_isyn:
dtype += [('isyn_e', float), ('isyn_i', float)]
if rec_pop_contributions:
dtype += list(zip(self.population_names,
[float] * len(self.population_names)))
# setup list of structured arrays for all extracellular potentials
# at each contact from different source terms and subpopulations
if to_memory:
for probe, M in zip(probes, transforms):
probe.data = np.zeros((M.shape[0],
int(self.tstop / self.dt) + 1),
dtype=dtype)
# signals for each probe will be stored here during simulations
if to_file:
# ensure right ending:
if file_name.split('.')[-1] != 'h5':
file_name += '.h5'
outputfile = h5py.File(os.path.join(self.OUTPUTPATH,
file_name.format(RANK)), 'w')
# define unique group names for each probe
names = []
for probe, M in zip(probes, transforms):
name = probe.__class__.__name__
i = 0
while True:
if name + '{}'.format(i) not in names:
names.append(name + '{}'.format(i))
break
i += 1
# create groups
for i, (name, probe, M) in enumerate(zip(names, probes,
transforms)):
# can't do it this way until h5py issue #740
# (https://github.com/h5py/h5py/issues/740) is fixed:
# outputfile['{}'.format(name)] = np.zeros((M.shape[0],
# int(network.tstop / network.dt) + 1), dtype=dtype)
probe.data = outputfile.create_group('{}'.format(name))
for key, val in dtype:
probe.data[key] = np.zeros((M.shape[0],
int(self.tstop / self.dt)
+ 1),
dtype=val)
# temporary vector to store membrane currents at each timestep:
imem = np.zeros(network_dummycell.totnsegs, dtype=dtype)
def get_imem(imem):
'''helper function to gather currents across all cells
on this RANK'''
i = 0
totnsegs = 0
if use_isyn:
imem['isyn_e'] = 0. # must reset these for every iteration
imem['isyn_i'] = 0. # because we sum over synapses
for cell in cells:
for sec in cell.allseclist:
for seg in sec:
imem['imem'][i] = seg.i_membrane_
if use_ipas:
imem['ipas'][i] = seg.i_pas
if use_icap:
imem['icap'][i] = seg.i_cap
i += 1
if use_isyn:
for idx, syn in zip(cell.synidx, cell.netconsynapses):
if hasattr(syn, 'e') and syn.e > -50:
imem['isyn_e'][idx + totnsegs] += syn.i
else:
imem['isyn_i'][idx + totnsegs] += syn.i
totnsegs += cell.totnsegs
return imem
# run fadvance until time limit, and calculate LFPs for each timestep
tstep = 0
while neuron.h.t < self.tstop:
if neuron.h.t >= 0:
imem = get_imem(imem)
for j, (probe, M) in enumerate(zip(probes, transforms)):
probe.data['imem'][:, tstep] = M @ imem['imem']
if use_ipas:
probe.data['ipas'][:, tstep] = \
M @ (imem['ipas'] * network_dummycell.area * 1E-2)
if use_icap:
probe.data['icap'][:, tstep] = \
M @ (imem['icap'] * network_dummycell.area * 1E-2)
if use_isyn:
probe.data['isyn_e'][:, tstep] = M @ imem['isyn_e']
probe.data['isyn_i'][:, tstep] = M @ imem['isyn_i']
if rec_pop_contributions:
for j, (probe, M) in enumerate(zip(probes, transforms)):
k = 0 # counter
for nsegs, pop_name in zip(population_nsegs,
self.population_names):
cellinds = np.arange(k, k + nsegs)
probe.data[pop_name][:, tstep] = \
M[:, cellinds] @ imem['imem'][cellinds, ]
k += nsegs
tstep += 1
neuron.h.fadvance()
if neuron.h.t % 100. == 0.:
if RANK == 0:
print('t = {} ms'.format(neuron.h.t))
try:
# calculate LFP after final fadvance(), skipped if IndexError is
# encountered
imem = get_imem(imem)
for j, (probe, M) in enumerate(zip(probes, transforms)):
probe.data['imem'][:, tstep] = M @ imem['imem']
if use_ipas:
probe.data['ipas'][:, tstep] = \
M @ (imem['ipas'] * network_dummycell.area * 1E-2)
if use_icap:
probe.data['icap'][:, tstep] = \
M @ (imem['icap'] * network_dummycell.area * 1E-2)
if use_isyn:
probe.data['isyn_e'][:, tstep] = M @ imem['isyn_e']
probe.data['isyn_i'][:, tstep] = M @ imem['isyn_i']
if rec_pop_contributions:
for j, (probe, M) in enumerate(zip(probes, transforms)):
k = 0 # counter
for nsegs, pop_name in zip(population_nsegs,
self.population_names):
cellinds = np.arange(k, k + nsegs)
probe.data[pop_name][:, tstep] = \
M[:, cellinds] @ imem['imem'][cellinds, ]
k += nsegs
except IndexError:
pass
if to_file:
outputfile.close()
def ReduceStructArray(sendbuf, op=MPI.SUM):
"""
simplify MPI Reduce for structured ndarrays with floating point numbers
Parameters
----------
sendbuf: structured ndarray
Array data to be reduced (default: summed)
op: mpi4py.MPI.Op object
MPI_Reduce function. Default is mpi4py.MPI.SUM
Returns
-------
recvbuf: structured ndarray or None
Reduced array on RANK 0, None on all other RANKs
"""
if RANK == 0:
shape = sendbuf.shape
dtype_names = sendbuf.dtype.names
else:
shape = None
dtype_names = None
shape = COMM.bcast(shape)
dtype_names = COMM.bcast(dtype_names)
if RANK == 0:
reduced = np.zeros(shape,
dtype=list(zip(dtype_names,
['f8' for i in range(len(dtype_names)
)])))
else:
reduced = None
for name in dtype_names:
if RANK == 0:
recvbuf = np.zeros(shape)
else:
recvbuf = None
COMM.Reduce(np.array(sendbuf[name]), recvbuf, op=op, root=0)
if RANK == 0:
reduced[name] = recvbuf
return reduced
| LFPy/LFPy | LFPy/network.py | Python | gpl-3.0 | 69,051 | [
"NEURON"
] | e8907f13bebfa657e927bc6eac7b2255c2ee26a340b7d7dcc81692f3f743ada1 |
from __future__ import absolute_import
from __future__ import print_function
import unittest
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.adsorption import *
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen import Structure, Lattice, Molecule
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class AdsorbateSiteFinderTest(PymatgenTest):
def setUp(self):
self.structure = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3.5),
["Ni"], [[0, 0, 0]])
lattice = Lattice.cubic(3.010)
frac_coords = [[0.00000, 0.00000, 0.00000],
[0.00000, 0.50000, 0.50000],
[0.50000, 0.00000, 0.50000],
[0.50000, 0.50000, 0.00000],
[0.50000, 0.00000, 0.00000],
[0.50000, 0.50000, 0.50000],
[0.00000, 0.00000, 0.50000],
[0.00000, 0.50000, 0.00000]]
species = ['Mg', 'Mg', 'Mg', 'Mg', 'O', 'O', 'O', 'O']
self.MgO = Structure(lattice, species, frac_coords)
slabs = generate_all_slabs(self.structure, max_index=2,
min_slab_size=6.0, min_vacuum_size=15.0,
max_normal_search=1, center_slab=True)
self.slab_dict = {''.join([str(i) for i in slab.miller_index]):
slab for slab in slabs}
self.asf_211 = AdsorbateSiteFinder(self.slab_dict["211"])
self.asf_100 = AdsorbateSiteFinder(self.slab_dict["100"])
self.asf_111 = AdsorbateSiteFinder(self.slab_dict["111"])
self.asf_110 = AdsorbateSiteFinder(self.slab_dict["110"])
self.asf_struct = AdsorbateSiteFinder(
Structure.from_sites(self.slab_dict["111"].sites))
def test_init(self):
asf_100 = AdsorbateSiteFinder(self.slab_dict["100"])
asf_111 = AdsorbateSiteFinder(self.slab_dict["111"])
def test_from_bulk_and_miller(self):
# Standard site finding
asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 1, 1))
sites = asf.find_adsorption_sites()
self.assertEqual(len(sites['hollow']), 2)
self.assertEqual(len(sites['bridge']), 1)
self.assertEqual(len(sites['ontop']), 1)
self.assertEqual(len(sites['all']), 4)
asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 0, 0))
sites = asf.find_adsorption_sites()
self.assertEqual(len(sites['all']), 3)
self.assertEqual(len(sites['bridge']), 2)
asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 1, 0),
undercoord_threshold=0.1)
self.assertEqual(len(asf.surface_sites), 1)
# Subsurface site finding
asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 1, 1))
sites = asf.find_adsorption_sites(positions=["ontop", "subsurface", "bridge"])
self.assertEqual(len(sites['all']), 4)
self.assertEqual(len(sites['subsurface']), 3)
def test_find_adsorption_sites(self):
sites = self.asf_100.find_adsorption_sites()
self.assertEqual(len(sites['all']), 3)
self.assertEqual(len(sites['hollow']), 0)
self.assertEqual(len(sites['bridge']), 2)
self.assertEqual(len(sites['ontop']), 1)
sites = self.asf_111.find_adsorption_sites()
self.assertEqual(len(sites['all']), 4)
sites = self.asf_110.find_adsorption_sites()
self.assertEqual(len(sites['all']), 4)
sites = self.asf_211.find_adsorption_sites()
# Test on structure
sites = self.asf_struct.find_adsorption_sites()
def test_generate_adsorption_structures(self):
co = Molecule("CO", [[0, 0, 0], [0, 0, 1.23]])
structures = self.asf_111.generate_adsorption_structures(co, repeat=[2, 2, 1])
self.assertEqual(len(structures), 4)
sites = self.asf_111.find_adsorption_sites()
# Check repeat functionality
self.assertEqual(len([site for site in structures[0] if
site.properties['surface_properties'] != 'adsorbate']),
4 * len(self.asf_111.slab))
for n, structure in enumerate(structures):
self.assertArrayAlmostEqual(structure[-2].coords, sites['all'][n])
find_args = {"positions": ["hollow"]}
structures_hollow = self.asf_111. \
generate_adsorption_structures(co, find_args=find_args)
self.assertEqual(len(structures_hollow), len(sites['hollow']))
for n, structure in enumerate(structures_hollow):
self.assertTrue(in_coord_list(sites['hollow'], structure[-2].coords))
def test_adsorb_both_surfaces(self):
# Test out for monatomic adsorption
o = Molecule("O", [[0, 0, 0]])
adslabs = self.asf_100.adsorb_both_surfaces(o)
adslabs_one = self.asf_100.generate_adsorption_structures(o)
self.assertEqual(len(adslabs), len(adslabs_one))
for adslab in adslabs:
sg = SpacegroupAnalyzer(adslab)
sites = sorted(adslab, key=lambda site: site.frac_coords[2])
self.assertTrue(sites[0].species_string == "O")
self.assertTrue(sites[-1].species_string == "O")
self.assertTrue(sg.is_laue())
# Test out for molecular adsorption
oh = Molecule(["O", "H"], [[0, 0, 0], [0, 0, 1]])
adslabs = self.asf_100.adsorb_both_surfaces(oh)
adslabs_one = self.asf_100.generate_adsorption_structures(oh)
self.assertEqual(len(adslabs), len(adslabs_one))
for adslab in adslabs:
sg = SpacegroupAnalyzer(adslab)
sites = sorted(adslab, key=lambda site: site.frac_coords[2])
self.assertTrue(sites[0].species_string in ["O", "H"])
self.assertTrue(sites[-1].species_string in ["O", "H"])
self.assertTrue(sg.is_laue())
def test_generate_substitution_structures(self):
# Test this for a low miller index halite structure
slabs = generate_all_slabs(self.MgO, 1, 10, 10, center_slab=True,
max_normal_search=1)
for slab in slabs:
adsgen = AdsorbateSiteFinder(slab)
adslabs = adsgen.generate_substitution_structures("Ni")
# There should be 2 configs (sub O and sub
# Mg) for (110) and (100), 1 for (111)
if tuple(slab.miller_index) != (1,1,1):
self.assertEqual(len(adslabs), 2)
else:
self.assertEqual(len(adslabs), 1)
# Test out whether it can correctly dope both
# sides. Avoid (111) becasue it is not symmetric
if tuple(slab.miller_index) != (1,1,1):
adslabs = adsgen.generate_substitution_structures("Ni", sub_both_sides=True,
target_species=["Mg"])
# Test if default parameters dope the surface site
for i, site in enumerate(adslabs[0]):
if adsgen.slab[i].surface_properties == "surface" \
and site.species_string == "Mg":
print(adslabs[0][i].surface_properties,
adsgen.slab[i].surface_properties)
self.assertTrue(adslabs[0][i].surface_properties == "substitute")
self.assertTrue(adslabs[0].is_symmetric())
# Correctly dope the target species
self.assertEqual(adslabs[0].composition.as_dict()["Mg"],
slab.composition.as_dict()["Mg"]-2)
# There should be one config (sub Mg)
self.assertEqual(len(adslabs), 1)
def test_functions(self):
slab = self.slab_dict["111"]
rot = get_rot(slab)
reoriented = reorient_z(slab)
if __name__ == '__main__':
unittest.main()
| czhengsci/pymatgen | pymatgen/analysis/tests/test_adsorption.py | Python | mit | 8,128 | [
"pymatgen"
] | 38848a00c08dfb01c547b0174d0ef8a2fff8096c8b7827ce166f9aa15931bfcd |
'''
digitallockin.py, Digital Lockin amplifier using NI PXI devices
Copyright: Zeust the Unoobian <2noob2banoob@gmail.com>, 2014
This file is part of DigitalLockin.
DigitalLockin is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DigitalLockin is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DigitalLockin. If not, see <http://www.gnu.org/licenses/>.
'''
import matplotlib.pyplot as plt
import numpy
import sys
import random
import logging
import time #for benchmark
try:
import digitallockinhwinterface as hwi
CAN_MEASURE = True
MAX_SAMPLE_FREQUENCY = hwi.MAX_SAMPLE_FREQUENCY_MEAS_DAQ
logging.info('Loaded hardware interface module.')
except Exception:
CAN_MEASURE = False
MAX_SAMPLE_FREQUENCY = 200000
logging.exception('Could not load hardware interface. You can perform simulated lock-in sequences on virtual noise sources but not actual measurements on physical devices. Details:')
# Simple Euler integration
# A sample of the output is the sum of all input samples up to the same sample number
# Uses a unit timestep instead of taking the timestep as a parameter so the result is not properly scaled
def _integrate(x):
if x[0].size != 1:
y=numpy.zeros([len(x),len(x[0])])
for i in range(len(x)):
y[i][:] = _integrate(x[i][:])
else:
y = numpy.zeros(x.size)
y[0] = x[0]
for i in xrange(1,x.size):
y[i] = y[i-1] + x[i]
return y
# Filtering function
# Every filter is realised as a cascade of first-order alpha filters
# The initial value of each filter is the average of its input
# The alpha values of subsequent first-order filters are supplied as a list
# For a first-order filter you can also supply just the element rather than a one-dimensional list
# The alpha-cascade implementation has some limitations, because it does not support complex poles
def _filter(x, alpha=numpy.pi/200000):
if isinstance(x[0], list) or isinstance(x[0], numpy.ndarray):
y = numpy.zeros([len(x), len(x[0])])
for i in range(len(x)):
y[i][:] = _filter(x[i][:], alpha)
return y
if not isinstance(alpha, list):
alpha = [alpha]
if len(alpha) == 0:
return x
#yinit = numpy.average(x)
yinit = 0
a = alpha[-1]
am = 1 - a
y = numpy.zeros(len(x))
y[0] = am * yinit + a * x[0]
for i in xrange(1, len(x)):
y[i] = am * y[i-1] + a * x[i]
return _filter(y, alpha[:-1])
#Generate a zero-mean noise signal with specified standard deviation and number of samples
def _gaussiannoise(sigma, numsamples):
y = numpy.zeros(numsamples)
for i in xrange(numsamples):
y[i] = random.gauss(0, sigma)
y -= numpy.average(y)
return y
class DigitalLockin:
'''
Digital Lock-in Amplifier class
This class does NOT do the lock-in in real-time
Can initialise hardware, run measurements, process data and make some plots
This class is also capable of running a simulated measurement with Gaussian noise
In this case it always generates a sine wave of amplitude 1
The amplitude argument to the constructor is then interpreted as the noise sigma
It also generates only one output channel regardless of the number of specified channels
'''
################################################
##### Initialization and closing functions #####
################################################
def __init__(self, gen_dev='PXI5412_12', meas_dev='PXI4462_3', gen_ch='0', gen_meas_ch='ai0', meas_ch='ai1', Fs=MAX_SAMPLE_FREQUENCY, Fsignal=MAX_SAMPLE_FREQUENCY*101./20201, gen_amplitude=1, gen_output_impedance=50, simulated=False):
'''
The constructor
Arguments:
gen_dev : string [Default: 'PXI5412_12']
The waveform generator device ID/location
This argument is ignored in simulation mode
meas_dev : string [Default: 'PXI4462_3']
The signal analyser device ID/location
This argument is ignored in simulation mode
gen_ch : string [Default: '0']
The generation channel name
This argument is ignored in simulation mode
gen_meas_ch : string [Default: 'ai0']
The channel for measuring the generated input signal
This argument is ignored in simulation mode
meas_ch : string [Default: 'ai1']
The channel(s) for measuring output signals of your device under measurement
Example values:
'ai1' (Single channel, value notation)
['ai1'] (Single channel, list notation)
['ai1', 'ai2'] (Multiple channels)
This argument is ignored in simulation mode
Fs : float [Default: 204800]
Sample frequency of the signal analyser in Hz
Fsignal : float [Default: 1000]
Frequency of the generated input sine wave in Hz
gen_amplitude : float [Default: 1]
Amplitude of the generated signal in V
When in simulation mode, this is the noise amplitude instead
gen_output_impedance : float [Default: 50]
Output impedance of the waveform generator in Ohm
This argument is ignored in simulation mode
simulated : boolean [Default: False]
True for simulation mode, False for measurement mode
'''
self._simulated = simulated
self._is_measuring = False
self.gen_dev_str = gen_dev # Not gonna make a getter and setter for a variable which isn't internally used
self.meas_dev_str = meas_dev # Not gonna make a getter and setter for a variable which isn't internally used
self.free_data()
self.set_flt_time_constant()
if simulated:
self._simulation_noise_amplitude = gen_amplitude
self._hw = None
self._fs = Fs
self._f = Fsignal
elif CAN_MEASURE:
self._hw = hwi.MeasurementHardwareInterface(gen_dev=gen_dev, meas_dev=meas_dev, gen_ch=gen_ch, meas_ch=meas_ch, Fs=Fs, Fsignal=Fsignal, gen_amplitude=gen_amplitude, gen_output_impedance=gen_output_impedance)
self._simulation_noise_amplitude = None
self._fs = self._hw.get_meas_sample_frequency()
self._f = self._hw.get_gen_signal_frequency()
self._x1 = numpy.zeros(len(self._hw.get_measurement_channels()) + 1)
self._y1 = numpy.zeros(len(self._hw.get_measurement_channels()) + 1)
self._x2 = numpy.zeros(len(self._hw.get_measurement_channels()) + 1)
self._y2 = numpy.zeros(len(self._hw.get_measurement_channels()) + 1)
else:
raise RuntimeError('Cannot load hardware interface module so cannot initialise in measurement mode')
def close_hardware(self):
'''Close hardware tasks and sessions and release their handles'''
if self._hw is not None:
self._hw.close()
def free_rawdata(self):
'''
Erase raw data from memory and free the memory it used
! This cannot be undone !
'''
self._rawdata = None
def free_intermediate_calc_results(self):
'''Erase intermediate data processing results and free the memory it used'''
self._sinx = None
self._cosx = None
self._sini = None
self._cosi = None
self._amplitudes = None
self._phases = None
def free_t(self):
'''Erase the time vector that is stored for convenienced and free the memory it used'''
self._t = None
def free_calcdata(self):
'''Erase the time vector, intermediate data processing results and data processing end results, and free the memory they used'''
self.free_t()
self.free_intermediate_calc_results()
self._normamplitudes = None
self._normphases = None
self._gen_meas_amplitude = None
def free_data(self):
'''
Erase all measured data and processing results and free all used memory
! This cannot be undone !
'''
self.free_rawdata()
self.free_calcdata()
self._x1 = 0.
self._y1 = 0.
self._x2 = 0.
self._y2 = 0.
self._phi = 0.
def close(self):
'''
Release all resources used by this object
This includes used memory as well as hardware tasks and sessions
! This cannot be undone !
'''
self.close_hardware()
self.free_data()
############################
##### Universal setter #####
############################
def set(self, F=None, A=None, Fs=None):
'''Universal setter for signal frequency <F>, sample frequency <Fs> and signal amplitude <A>'''
if F is not None:
if self._simulated:
self._f = F
else:
try:
self._hw.set_gen_signal_frequency(F)
except RuntimeWarning as w:
logging.warning(str(w))
self._f = self._hw.get_gen_signal_frequency()
if A is not None:
if not self._simulated:
try:
self._hw.set_gen_signal_amplitude(A)
except RuntimeWarning as w:
logging.warning(str(w))
if Fs is not None:
if self._simulated:
self._fs = Fs
else:
self._hw.set_meas_sample_frequency(Fs)
self._fs = self._hw.get_meas_sample_frequency()
def set_channels(self, meas_ch=None, gen_meas_ch=None, gen_ch=None):
'''Set measurement channels <meas_ch>, generated signal measurement channel <gen_meas_ch> and/or generator channel <gen_ch>'''
if meas_ch is not None:
self._hw.set_meas_channels(meas_ch)
self._x1 = numpy.zeros(len(self._hw.get_measurement_channels()) + 1)
self._y1 = numpy.zeros(len(self._hw.get_measurement_channels()) + 1)
self._x2= numpy.zeros(len(self._hw.get_measurement_channels()) + 1)
self._y2 = numpy.zeros(len(self._hw.get_measurement_channels()) + 1)
if gen_meas_ch is not None:
self._hw.set_gen_meas_channel(gen_meas_ch)
if gen_ch is not None:
self._hw.set_gen_channel(gen_ch)
###################
##### Getters #####
###################
def get_f(self):
'''Get generated signal frequency'''
return self._f
def get_fs(self):
'''Get sample frequency'''
return self._fs
def get_gen_amplitude(self):
'''Get generated signal amplitude'''
if self._simulated:
return 1
else:
return self._hw.get_gen_signal_amplitude()
def get_num_meas_ch(self):
'''Get number of measurement channels (excluding the one for the generated signal)'''
return len(self._hw.get_measurement_channels())
#################################
##### Measurement functions #####
#################################
def run_measurement(self, periods):
'''
In measurement mode:
Start the signal generators,
run a measurement for <periods> signal periods and save the raw data,
then stop the signal generators again
In simulation mode:
Save a perfect sine as input signal
Add gaussian noise to that sine and save it as output signal
'''
if self._simulated:
num_samples = int(round(periods * self._fs / self._f))
t = numpy.array(range(num_samples)) * 1. / self._fs
sig_in = numpy.sin(2 * numpy.pi * self._f * t)
sig_out = sig_in + _gaussiannoise(self._simulation_noise_amplitude, num_samples)
self._rawdata = numpy.array([sig_in, sig_out])
else:
self._hw.start_generation()
self._rawdata = self._hw.measure_periods(periods)
self._hw.stop_generation()
def start_measurement(self, bufsize=409600):
'''
Start the waveform generators and inform the signal analysers to collect samples
Do not retrieve any measured samples to the pc yet
In simulation mode this sets a boolean to allow acquiring samples
'''
if self._is_measuring:
raise RuntimeWarning('Already measuring')
else:
self._is_measuring = True
if not self._simulated:
self._hw.start_generation()
self._hw.start_measurement(bufsize=bufsize)
def retrieve_samples(self, samples, append=False):
'''Retrieve <samples> samples if the device is currently measuring'''
if self._is_measuring:
if self._simulated:
t = numpy.array(range(samples)) / float(self._fs)
sig_in = numpy.sin(2 * numpy.pi * self._f * t)
sig_out = sig_in + _gaussiannoise(self._simulation_noise_amplitude, samples)
rawdata = numpy.array([sig_in, sig_out])
else:
rawdata = self._hw.retrieve_samples(samples)
if append:
self._rawdata = numpy.append(self._rawdata, rawdata, 1)
else:
self._rawdata = rawdata
else:
raise RuntimeWarning('Tried to retrieve %d samples from non-measuring device', samples)
def retrieve_periods(self, periods, append=False):
'''Retrieve <periods> signal periods worth of samples if the device is currently measuring'''
self.retrieve_samples(int(round(float(self._fs) / self._f * periods)), append)
def retrieve_seconds(self, seconds, append=False):
'''
Retrieve samples for <seconds> seconds if the device is currently measuring
Automatically rounds to an integer number of signal periods so you don't have to worry about artefacts caused by a non-integer amount of periods
'''
self.retrieve_periods(round(self._f * seconds), append)
return round(round(self._f * seconds) * self._fs / self._f) / self._fs
def stop_measurement(self):
'''Stop the collection of samples and the waveform generators'''
if self._is_measuring:
if not self._simulated:
self._hw.end_measurement()
self._hw.stop_generation()
self._is_measuring = False
else:
raise RuntimeWarning('Tried stopping device from measuring but it already wasn\'t')
def is_measuring(self):
return self._is_measuring
def num_measured_samples_in_instrument_buffer(self):
'''
Find out how many samples are left in the instrument's sample buffer.
Returns None on failure and 0 during simulation or when not measuring.
'''
if self._is_measuring and not self._simulated:
return self._hw.measured_samples_in_instrument_buffer()
else:
return 0
##############################################################
##### Measure and filter function for continuous lock-in #####
##############################################################
def continuous_retrieve_and_filter(self):
'''
Retrieve all samples in instrument buffer if the device is currently measuring
Apply synchronous detection (i.e. multiply with a sine and cosine)
Filter the result with a second-order filter (two cascaded normalising alpha filters)
Higher-order filters may be implementable in the future, but it is not
trivial because the optimized numpy implementation requires that the samples
be processed in bulk and only the last sample of the filter output is calculated
Each normalising alpha filter works like:
/-----------\ /---\
-------->| 1 - alpha |----->| + |---------------+----->
\-----------/ \---/ |
^ |
| /-------\ |
\----| alpha |<---/
\-------/
'''
if self._is_measuring and not self._simulated:
# Calculate -ln(alpha) and -ln(1-alpha)
mlnalpha = - numpy.log(1 - 1. / self._flt_tau / self._fs)
mlnialpha = numpy.log(self._flt_tau * self._fs)
# Acquire samples
rawdata = self._hw.retrieve_samples(-1, .1, True)
channels = rawdata.shape[0]
samples = rawdata.shape[1]
# Calculate phases for synchronous detection
multfac = 2 * numpy.pi * self._f / self._fs
phi = numpy.arange(self._phi, self._phi + multfac * (samples-0.5), multfac)
# The following phase expression may drift over time due to rounding errors
# But that'll only affect the detected common mode phase which is arbitrary and rejected anyway
self._phi = numpy.mod(self._phi + 2 * numpy.pi * self._f / self._fs * samples, 2 * numpy.pi)
# Calculate multiplication factors for filters
# Sample to output of first filter
flt1weight = numpy.exp(numpy.arange(-(samples-1) * mlnalpha - mlnialpha, mlnalpha/2 - mlnialpha, mlnalpha))
# initvalmulfac1*Sample to output of second filter
flt2weightr = numpy.repeat((numpy.exp(-mlnialpha) * numpy.arange(samples, 0.5, -1)).reshape([1,samples]), channels, axis=0)
# Initial value to output of the same alpha filter
initvalmulfac1 = numpy.exp(- mlnalpha * samples)
# Initial value to output of next alpha filter
initvalmulfac2 = samples * numpy.exp(- mlnalpha * samples - mlnialpha)
# Perform synchronous detection and filtering
f1sin = rawdata * numpy.repeat((numpy.sin(phi) * flt1weight).reshape([1,samples]), channels, axis=0)
f1cos = rawdata * numpy.repeat((numpy.cos(phi) * flt1weight).reshape([1,samples]), channels, axis=0)
f2sin = f1sin * flt2weightr
f2cos = f1cos * flt2weightr
self._x2 = initvalmulfac1 * self._x2 + initvalmulfac2 * self._x1 + f2sin.sum(axis=1)
self._y2 = initvalmulfac1 * self._y2 + initvalmulfac2 * self._y1 + f2cos.sum(axis=1)
self._x1 = initvalmulfac1 * self._x1 + f1sin.sum(axis=1)
self._y1 = initvalmulfac1 * self._y1 + f1cos.sum(axis=1)
elif self._is_measuring:
raise RuntimeError('Continuous running mode not supported for simulation')
else:
raise RuntimeWarning('Tried to retrieve samples from non-measuring device')
def continuous_get_r_phi(self):
#r = numpy.sqrt(numpy.append(self._x1, self._x2 / (2*self._flt_tau * self._fs)**2)**2 + numpy.append(self._y1, self._y2 / (2*self._flt_tau * self._fs)**2)**2)
r = numpy.sqrt(numpy.append(self._x1, self._x2)**2 + numpy.append(self._y1, self._y2)**2)
#r = numpy.sqrt(self._x2**2 + self._y2**2)
#r[1:] /= r[0]
r[1:int(len(r)/2)] /= r[0]
r[int(len(r)/2)+1:] /= r[int(len(r)/2)]
#r[0] /= self._flt_tau * self._fs / 2
r[0::int(len(r)/2)] *= 2
#phi = numpy.arctan2(self._y2, self._x2)
phi = numpy.arctan2(numpy.append(self._y1, self._y2), numpy.append(self._x1, self._x2))
phi = numpy.mod(phi[1:] - phi[0] + numpy.pi, 2 * numpy.pi) - numpy.pi
phi[int(len(r)/2):] -= phi[int(len(r)/2) - 1]
return (r, phi)
def set_flt_alpha(self, alpha=0.9):
self._flt_tau = 1. / self._fs / (1 -alpha)
def set_flt_time_constant(self, tau=0.1):
self._flt_tau = tau
#####################################
##### Data processing functions #####
#####################################
def process_data(self):
'''Perform lock-in analysis and save results in memory'''
try:
if self._rawdata is None:
raise RuntimeError()
except Exception:
logging.error('No raw data found')
return
phi = 2*numpy.pi*self._f * numpy.array(range(len(self._rawdata[0]))) / float(self._fs)
sini = (numpy.sin(phi) * self._rawdata).sum(axis=1)
cosi = (numpy.cos(phi) * self._rawdata).sum(axis=1)
amplitudes = numpy.sqrt(sini**2 + cosi**2)
phases = numpy.arctan2(cosi, sini)
self._normamplitudes = amplitudes[1:] / amplitudes[0]
normphases = phases[1:] - phases[0]
self._normphases = numpy.mod(normphases + numpy.pi, 2 * numpy.pi) - numpy.pi
self._gen_meas_amplitude = 2 * amplitudes[0] / float(len(phi))
return (self._gen_meas_amplitude, self._normamplitudes, self._normphases)
def process_data_moreinfo(self, fltord=0, RC=1/numpy.pi):
'''
Perform lock-in analysis and save results in memory
Saves lots of intermediate calculated values
Also has the option to apply a cascade of <fltord> identical RC-filters with RC-time <RC> before integration
'''
try:
if self._rawdata is None:
raise RuntimeError()
except Exception:
logging.error('No raw data found')
return
self._t = numpy.array(range(len(self._rawdata[0])))*1./self._fs
tsin = numpy.sin(2*numpy.pi*self._f*self._t)
tcos = numpy.cos(2*numpy.pi*self._f*self._t)
self._sinx = tsin * self._rawdata
self._cosx = tcos * self._rawdata
if fltord == 0:
self._sinf = self._sinx
self._cosf = self._cosx
else:
alpha = 1 / (RC * self._fs)
self._sinf = _filter(self._sinx, [alpha]*fltord)
self._cosf = _filter(self._cosx, [alpha]*fltord)
self._sini = _integrate(self._sinf)
self._cosi = _integrate(self._cosf)
self._amplitudes = numpy.sqrt(self._sini**2 + self._cosi**2)
self._phases = numpy.arctan2(self._cosi, self._sini)
self._normamplitudes_all = self._amplitudes[1:][:] / self._amplitudes[0][:]
self._normphases_all = numpy.mod(self._phases[1:][:] - self._phases[0][:] + numpy.pi, 2*numpy.pi) - numpy.pi
sampsperperiod = self._fs / self._f
self._t_smp = self._t[sampsperperiod-1::sampsperperiod]
self._normamplitudes_smp = [[]]*len(self._normamplitudes_all)
self._normphases_smp = [[]]*len(self._normphases_all)
self._normamplitudes = numpy.zeros(len(self._normamplitudes_all))
self._normphases = numpy.zeros(len(self._normphases_all))
for i in range(len(self._normamplitudes)):
self._normamplitudes_smp[i] = self._normamplitudes_all[i][sampsperperiod-1::sampsperperiod]
self._normphases_smp[i] = self._normphases_all[i][sampsperperiod-1::sampsperperiod]
self._normamplitudes[i] = self._normamplitudes_all[i][-1]
self._normphases[i] = self._normphases_all[i][-1]
self._normamplitudes_smp = numpy.array(self._normamplitudes_smp)
self._normphases_smp = numpy.array(self._normphases_smp)
self._gen_meas_amplitude = 2 * self._amplitudes[0][-1] / float(len(self._t))
return (self._gen_meas_amplitude, self._normamplitudes, self._normphases)
#############################
##### Display functions #####
#############################
def plot_results(self):
'''
Plot how the results of the lock-in measurement evolve as measurement extends across more signal periods
Very useful when testing the influence of various lock-in parameters, in particular the number of periods during which is measured
Less useful when doing a lot of lock-in measurements and only comparing end results
Only works after process_data_moreinfo()
'''
try:
if self._normamplitudes_all is None or self._normphases_all is None:
raise RuntimeError()
except Exception:
logging.error('No intermediate processing data found')
if self._t is None:
t = numpy.array(range(len(self._rawdata[0])))*1./self._fs
else:
t = self._t
f, sf = plt.subplots(3,1)
try:
#print('%dx%d' % (len(self._rawdata[1:][:]), len(self._rawdata[0][:])))
rawout = self._rawdata[1:][:]
#rawout.reverse()
sf[0].plot(t, numpy.array(rawout).T)
if len(self._normamplitudes_smp.T) > 100:
sf[1].semilogy(self._t_smp, self._normamplitudes_smp.T)
sf[2].plot(self._t_smp, self._normphases_smp.T)
else:
sf[1].semilogy(self._t_smp, self._normamplitudes_smp.T, 'o', t, self._normamplitudes_all.T)
sf[2].plot(self._t_smp, self._normphases_smp.T, 'o', t, self._normphases_all.T)
if self._normamplitudes_all.max() > 10 * self._normamplitudes_smp.max():
sf[1].set_ylim([0, 2*max(max(self._normamplitudes_smp))])
plt.show()
time.sleep(0.1)
except Exception as e:
print('plotting error:\n%s' % str(e))
def printmainresults(self, compactfmt=True):
'''Print a terminal message with only the end value of the amplitude and phase of the integrated signal'''
try:
if self._normamplitudes is None or self._normphases is None or self._gen_meas_amplitude is None:
raise RuntimeError()
else:
if compactfmt:
strres = 'Ref({:.2e})'.format(self._gen_meas_amplitude)
for i in range(len(self._normamplitudes)):
strres += ', ch{:d}({:.3e} {:.0f}deg)'.format(i+1, self._normamplitudes[i], 180/numpy.pi * self._normphases[i])
logging.info(strres)
else:
logging.info('Reference amplitude: {:f} V'.format(self._gen_meas_amplitude))
for i in range(len(self._normamplitudes)):
#logging.info('channel {:d}: {:e} e^i {:f}'.format(i, self._amplitudes[i,-1], self._phases[i,-1]))
logging.info('channel {:d}: {:e} e^i {:f}'.format(i+1, self._normamplitudes[i], self._normphases[i]))
except Exception:
logging.exception('No processed data found')
def get_main_results(self):
'''Getter for the end value of the reference signal amplitude and the normalised amplitudes and phases of other integrated signals'''
try:
return (self._gen_meas_amplitude, self._normamplitudes, self._normphases)
except Exception:
raise RuntimeWarning('No results!')
#####################################################################################
##### Meta functions (not part of the class, just for running some quick tests) #####
#####################################################################################
def runonce(periods=1, freq=10, fs=1000, ch='ai1'):
dl = DigitalLockin(Fsignal=freq, Fs=fs, meas_ch=ch)
bm_t1 = time.clock()
dl.run_measurement(periods)
dl.close_hardware()
bm_t2 = time.clock()
#dl.process_data_moreinfo()
dl.process_data()
bm_t3 = time.clock()
dl.printmainresults()
#sys.stdout.flush()
#dl.plot_results()
dl.close()
print 'Measurement took %f seconds, analysis took %f' % (bm_t2-bm_t1, bm_t3-bm_t2)
return dl._normamplitudes
# Compare outcome for different filter orders
def check_filters(periods=1, freq=10, fs=1000, ch='ai1', RC=1/numpy.pi, a=0.1):
dl = DigitalLockin(Fsignal=freq, Fs=fs, meas_ch=ch, simulated=True, gen_amplitude=a)
dl.run_measurement(periods)
dl.close_hardware()
numords = 5
ordsin = []
ordcos = []
for i in range(numords):
dl.process_data(RC=RC, fltord=i)
print('Filter order %d' % i)
dl.printmainresults()
ordsin.append(dl._sinf[1])
ordcos.append(dl._cosf[1])
t = dl._t
inp_raw = dl._rawdata[0][:]
raw = dl._rawdata[1][:]
dl.close()
f, subf = plt.subplots(5,1,sharex=True)
subf[0].plot(t, raw, 'b', t, inp_raw, 'r')
subf[1].plot(t,numpy.array(ordsin).T)
subf[2].plot(t,numpy.array(ordcos).T)
spp = fs / freq
raw_nodc = raw - numpy.average(raw)
raw_oneperiodapart = - raw_nodc[spp/2:] * raw_nodc[:-spp/2]
subf[3].plot(t, raw**2, 'b', t, inp_raw**2, 'r')
subf[4].plot(t[spp/2:], raw_oneperiodapart, 'b', t, inp_raw**2, 'r')
print 'Input autocorrelation: %e' % numpy.sqrt(2 * numpy.average(inp_raw**2))
print 'Autocorrelation: %e' % numpy.sqrt(2 * numpy.average(raw**2))
print 'Half-period-shifted autocorrelation: %e %e %e' % (numpy.sqrt(2 * abs(numpy.average(raw_oneperiodapart))), numpy.sqrt(2 * abs(numpy.average(_filter(raw_oneperiodapart, 1/RC/fs)))), numpy.sqrt(2 * abs(numpy.average(_filter(raw_oneperiodapart, [1/RC/fs]*2)))))
sys.stdout.flush()
plt.show()
def filterchk2():
dt = numpy.pi / 30
t = numpy.arange(0,6*numpy.pi,numpy.pi/30)
s = numpy.sin(t)
c = numpy.cos(t)
sf = _filter(s, numpy.pi*dt)
cf = _filter(c, numpy.pi*dt)
f, subf = plt.subplots(2,1)
print '%d, %d,%d, %d,%d, %f,%f, %f,%f' % (len(t), len(s), len(c), len(sf), len(cf), numpy.average(s), numpy.average(c), numpy.average(sf), numpy.average(cf))
subf[0].plot(t,s,'r',t,sf,'b')
subf[1].plot(t,c,'r',t,cf,'b')
plt.show()
| zeusttu/DigitalLockin | digitallockin.py | Python | gpl-3.0 | 27,071 | [
"Gaussian"
] | 41e923b27a7c3726e0360b88ac65dd071285927d896abe7f624a55f08deaa5eb |
from numpy import linspace, zeros, array, meshgrid, abs, empty, arange, \
int32, unravel_index, dtype
from multiprocessing import Pool
from ..solvers import solver_dict, get_solver_name
# attempt to import plotting libraries
try:
from matplotlib import pyplot
from mpl_toolkits.mplot3d import axes3d
except ImportError:
pyplot = None
axes3d = None
mlab = None # mayavi may crash python
try: # for prettier colors
from palettable.colorbrewer import get_map
except ImportError:
try:
from brewer2mpl import get_map
except ImportError:
get_map = None
class phenotypePhasePlaneData:
"""class to hold results of a phenotype phase plane analysis"""
def __init__(self,
reaction1_name, reaction2_name,
reaction1_range_max, reaction2_range_max,
reaction1_npoints, reaction2_npoints):
self.reaction1_name = reaction1_name
self.reaction2_name = reaction2_name
self.reaction1_range_max = reaction1_range_max
self.reaction2_range_max = reaction2_range_max
self.reaction1_npoints = reaction1_npoints
self.reaction2_npoints = reaction2_npoints
self.reaction1_fluxes = linspace(0, reaction1_range_max,
reaction1_npoints)
self.reaction2_fluxes = linspace(0, reaction2_range_max,
reaction2_npoints)
self.growth_rates = zeros((reaction1_npoints, reaction2_npoints))
self.shadow_prices1 = zeros((reaction1_npoints, reaction2_npoints))
self.shadow_prices2 = zeros((reaction1_npoints, reaction2_npoints))
self.segments = zeros(self.growth_rates.shape, dtype=int32)
self.phases = []
def plot(self):
"""plot the phenotype phase plane in 3D using any available backend"""
if pyplot is not None:
self.plot_matplotlib()
elif mlab is not None:
self.plot_mayavi()
else:
raise ImportError("No suitable 3D plotting package found")
def plot_matplotlib(self, theme="Paired", scale_grid=False):
"""Use matplotlib to plot a phenotype phase plane in 3D.
theme: color theme to use (requires palettable)
returns: maptlotlib 3d subplot object"""
if pyplot is None:
raise ImportError("Error importing matplotlib 3D plotting")
colors = empty(self.growth_rates.shape, dtype=dtype((str, 7)))
n_segments = self.segments.max()
# pick colors
if get_map is None:
color_list = ['#A6CEE3', '#1F78B4', '#B2DF8A', '#33A02C',
'#FB9A99', '#E31A1C', '#FDBF6F', '#FF7F00',
'#CAB2D6', '#6A3D9A', '#FFFF99', '#B15928']
else:
color_list = get_map(theme, 'Qualitative', n_segments).hex_colors
if n_segments > len(color_list):
from warnings import warn
warn("not enough colors to color all detected phases")
if n_segments > 0 and n_segments <= len(color_list):
for i in range(n_segments):
colors[self.segments == (i + 1)] = color_list[i]
else:
colors[:, :] = 'b'
if scale_grid:
# grid wires should not have more than ~20 points
xgrid_scale = int(self.reaction1_npoints / 20)
ygrid_scale = int(self.reaction2_npoints / 20)
else:
xgrid_scale, ygrid_scale = (1, 1)
figure = pyplot.figure()
xgrid, ygrid = meshgrid(self.reaction1_fluxes, self.reaction2_fluxes)
axes = figure.add_subplot(111, projection="3d")
xgrid = xgrid.transpose()
ygrid = ygrid.transpose()
axes.plot_surface(xgrid, ygrid, self.growth_rates, rstride=1,
cstride=1, facecolors=colors, linewidth=0,
antialiased=False)
axes.plot_wireframe(xgrid, ygrid, self.growth_rates, color="black",
rstride=xgrid_scale, cstride=ygrid_scale)
axes.set_xlabel(self.reaction1_name, size="x-large")
axes.set_ylabel(self.reaction2_name, size="x-large")
axes.set_zlabel("Growth rate", size="x-large")
axes.view_init(elev=30, azim=-135)
figure.tight_layout()
return axes
def plot_mayavi(self):
"""Use mayavi to plot a phenotype phase plane in 3D.
The resulting figure will be quick to interact with in real time,
but might be difficult to save as a vector figure.
returns: mlab figure object"""
from mayavi import mlab
figure = mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
figure.name = "Phenotype Phase Plane"
max = 10.0
xmax = self.reaction1_fluxes.max()
ymax = self.reaction2_fluxes.max()
zmax = self.growth_rates.max()
xgrid, ygrid = meshgrid(self.reaction1_fluxes, self.reaction2_fluxes)
xgrid = xgrid.transpose()
ygrid = ygrid.transpose()
xscale = max / xmax
yscale = max / ymax
zscale = max / zmax
mlab.surf(xgrid * xscale, ygrid * yscale, self.growth_rates * zscale,
representation="wireframe", color=(0, 0, 0), figure=figure)
mlab.mesh(xgrid * xscale, ygrid * yscale, self.growth_rates * zscale,
scalars=self.shadow_prices1 + self.shadow_prices2,
resolution=1, representation="surface", opacity=0.75,
figure=figure)
# draw axes
mlab.outline(extent=(0, max, 0, max, 0, max))
mlab.axes(opacity=0, ranges=[0, xmax, 0, ymax, 0, zmax])
mlab.xlabel(self.reaction1_name)
mlab.ylabel(self.reaction2_name)
mlab.zlabel("Growth rates")
return figure
def segment(self, threshold=0.01):
"""attempt to segment the data and identify the various phases"""
self.segments *= 0
# each entry in phases will consist of the following tuple
# ((x, y), shadow_price1, shadow_price2)
self.phases = []
# initialize the area to be all False
covered_area = (self.growth_rates * 0 == 1)
# as long as part of the area has not been covered
segment_id = 0
while self.segments.min() == 0:
segment_id += 1
# i and j are indices for a current point which has not been
# assigned a segment yet
i, j = unravel_index(self.segments.argmin(), self.segments.shape)
# update the segment id for any point with a similar shadow price
# to the current point
d1 = abs(self.shadow_prices1 - self.shadow_prices1[i, j])
d2 = abs(self.shadow_prices2 - self.shadow_prices2[i, j])
self.segments[(d1 < threshold) * (d2 < threshold)] += segment_id
# add the current point as one of the phases
self.phases.append((
(self.reaction1_fluxes[i], self.reaction2_fluxes[j]),
self.shadow_prices1[i, j], self.shadow_prices2[i, j]))
def _calculate_subset(arguments):
"""Calculate a subset of the phenotype phase plane data.
Store each result tuple as:
(i, j, growth_rate, shadow_price1, shadow_price2)"""
model = arguments["model"]
reaction1_fluxes = arguments["reaction1_fluxes"]
reaction2_fluxes = arguments["reaction2_fluxes"]
metabolite1_name = arguments["metabolite1_name"]
metabolite2_name = arguments["metabolite2_name"]
index1 = arguments["index1"]
index2 = arguments["index2"]
i_list = arguments["i_list"]
j_list = arguments["j_list"]
tolerance = arguments["tolerance"]
solver = solver_dict[arguments["solver"]]
results = []
reaction1 = model.reactions[index1]
reaction2 = model.reactions[index2]
problem = solver.create_problem(model)
solver.solve_problem(problem)
for a, flux1 in enumerate(reaction1_fluxes):
i = i_list[a]
# flux is actually negative for uptake. Also some solvers require
# float instead of numpy.float64
flux1 = float(-1 * flux1)
# change bounds on reaction 1
solver.change_variable_bounds(problem, index1, flux1 - tolerance,
flux1 + tolerance)
for b, flux2 in enumerate(reaction2_fluxes):
j = j_list[b]
flux2 = float(-1 * flux2) # same story as flux1
# change bounds on reaction 2
solver.change_variable_bounds(problem, index2, flux2 - tolerance,
flux2 + tolerance)
# solve the problem and save results
solver.solve_problem(problem)
solution = solver.format_solution(problem, model)
if solution is not None and solution.status == "optimal":
results.append((i, j, solution.f,
solution.y_dict[metabolite1_name],
solution.y_dict[metabolite2_name]))
else:
results.append((i, j, 0, 0, 0))
# reset reaction 2 bounds
solver.change_variable_bounds(problem, index2,
float(reaction2.lower_bound),
float(reaction2.upper_bound))
# reset reaction 1 bounds
solver.change_variable_bounds(problem, index1,
float(reaction1.lower_bound),
float(reaction1.upper_bound))
return results
def calculate_phenotype_phase_plane(
model, reaction1_name, reaction2_name,
reaction1_range_max=20, reaction2_range_max=20,
reaction1_npoints=50, reaction2_npoints=50,
solver=None, n_processes=1, tolerance=1e-6):
"""calculates the growth rates while varying the uptake rates for two
reactions.
returns: an object containing the growth rates for the uptake rates.
To plot the result, call the plot function of the returned object.
Example:
data = calculate_phenotype_phase_plane(my_model, "EX_foo", "EX_bar")
data.plot()
"""
if solver is None:
solver = get_solver_name()
data = phenotypePhasePlaneData(
str(reaction1_name), str(reaction2_name),
reaction1_range_max, reaction2_range_max,
reaction1_npoints, reaction2_npoints)
# find the objects for the reactions and metabolites
index1 = model.reactions.index(data.reaction1_name)
index2 = model.reactions.index(data.reaction2_name)
metabolite1_name = list(model.reactions[index1]._metabolites)[0].id
metabolite2_name = list(model.reactions[index2]._metabolites)[0].id
if n_processes > reaction1_npoints: # limit the number of processes
n_processes = reaction1_npoints
range_add = reaction1_npoints // n_processes
# prepare the list of arguments for each _calculate_subset call
arguments_list = []
i = arange(reaction1_npoints)
j = arange(reaction2_npoints)
for n in range(n_processes):
start = n * range_add
if n != n_processes - 1:
r1_range = data.reaction1_fluxes[start:start + range_add]
i_list = i[start:start + range_add]
else:
r1_range = data.reaction1_fluxes[start:]
i_list = i[start:]
arguments_list.append({
"model": model,
"index1": index1, "index2": index2,
"metabolite1_name": metabolite1_name,
"metabolite2_name": metabolite2_name,
"reaction1_fluxes": r1_range,
"reaction2_fluxes": data.reaction2_fluxes.copy(),
"i_list": i_list, "j_list": j.copy(),
"tolerance": tolerance, "solver": solver})
if n_processes > 1:
p = Pool(n_processes)
results = list(p.map(_calculate_subset, arguments_list))
else:
results = [_calculate_subset(arguments_list[0])]
for result_list in results:
for result in result_list:
i = result[0]
j = result[1]
data.growth_rates[i, j] = result[2]
data.shadow_prices1[i, j] = result[3]
data.shadow_prices2[i, j] = result[4]
data.segment()
return data
| JuBra/cobrapy | cobra/flux_analysis/phenotype_phase_plane.py | Python | lgpl-2.1 | 12,249 | [
"Mayavi"
] | 87f9ac34f5395c3c6e5b9cd1da002a0cffd762d164a6915415bbe4af54c5df4d |
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Stephen Fromm <sfromm@gmail.com>
# Brian Coca <briancoca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
import os
import os.path
import pipes
import shutil
import tempfile
from ansible import utils
from ansible.runner.return_data import ReturnData
class ActionModule(object):
TRANSFERS_FILES = True
def __init__(self, runner):
self.runner = runner
def _assemble_from_fragments(self, src_path, delimiter=None):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd,'w')
delimit_me = False
for f in sorted(os.listdir(src_path)):
fragment = "%s/%s" % (src_path, f)
if delimit_me and delimiter:
tmp.write(delimiter)
if os.path.isfile(fragment):
tmp.write(file(fragment).read())
delimit_me = True
tmp.close()
return temp_path
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
# load up options
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
src = options.get('src', None)
dest = options.get('dest', None)
delimiter = options.get('delimiter', None)
remote_src = options.get('remote_src', True)
if src is None or dest is None:
result = dict(failed=True, msg="src and dest are required")
return ReturnData(conn=conn, comm_ok=False, result=result)
if remote_src:
return self.runner._execute_module(conn, tmp, 'assemble', module_args, inject=inject, complex_args=complex_args)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter)
pathmd5 = utils.md5s(path)
remote_md5 = self.runner._remote_md5(conn, tmp, dest)
if pathmd5 != remote_md5:
resultant = file(path).read()
if self.runner.diff:
dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
if 'content' in dest_result.result:
dest_contents = dest_result.result['content']
if dest_result.result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise Exception("unknown encoding, failed: %s" % dest_result.result)
xfered = self.runner._transfer_str(conn, tmp, 'src', resultant)
# fix file permissions when the copy is done as a different user
if self.runner.sudo and self.runner.sudo_user != 'root':
self.runner._low_level_exec_command(conn, "chmod a+r %s" % xfered, tmp)
# run the copy module
module_args = "%s src=%s dest=%s original_basename=%s" % (module_args, pipes.quote(xfered), pipes.quote(dest), pipes.quote(os.path.basename(src)))
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant))
else:
res = self.runner._execute_module(conn, tmp, 'copy', module_args, inject=inject)
res.diff = dict(after=resultant)
return res
else:
module_args = "%s src=%s dest=%s original_basename=%s" % (module_args, pipes.quote(xfered), pipes.quote(dest), pipes.quote(os.path.basename(src)))
return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject)
| sileht/ansible | lib/ansible/runner/action_plugins/assemble.py | Python | gpl-3.0 | 4,345 | [
"Brian"
] | 55dbee2d4b60a371f36fb5b16b8fa1dbc7c76be74fbb84d939f6c81723e439cb |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
from MooseDocs import common
class TestBox(unittest.TestCase):
def testBasic(self):
b = common.box('foo\nbar', 'title', 42, 12, color=None)
gold = 'title\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n42\u2502' \
'foo \u2502\n43\u2502bar \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500' \
'\u2500\u2500\u2500\u2518'
self.assertEqual(b, gold)
if __name__ == '__main__':
unittest.main(verbosity=2)
| nuclear-wizard/moose | python/MooseDocs/test/common/test_box.py | Python | lgpl-2.1 | 833 | [
"MOOSE"
] | 57239e5f8c052b6418380b861056e03e51a64b26e1d8b469e1455d802f1beecd |
"""Ordinary Least Squares regression classes."""
__author__ = "Luc Anselin luc.anselin@asu.edu, David C. Folch david.folch@asu.edu"
import numpy as np
import copy as COPY
import numpy.linalg as la
import user_output as USER
import summary_output as SUMMARY
import robust as ROBUST
from utils import spdot, sphstack, RegressionPropsY, RegressionPropsVM
__all__ = ["OLS"]
class BaseOLS(RegressionPropsY, RegressionPropsVM):
"""
Ordinary least squares (OLS) (note: no consistency checks, diagnostics or
constant added)
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
robust : string
If 'white', then a White consistent estimator of the
variance-covariance matrix is given. If 'hac', then a
HAC consistent estimator of the variance-covariance
matrix is given. Default set to None.
gwk : pysal W object
Kernel spatial weights needed for HAC estimation. Note:
matrix must have ones along the main diagonal.
sig2n_k : boolean
If True, then use n-k to estimate sigma^2. If False, use n.
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
utu : float
Sum of squared residuals
sig2 : float
Sigma squared used in computations
sig2n : float
Sigma squared (computed with n in the denominator)
sig2n_k : float
Sigma squared (computed with n-k in the denominator)
xtx : float
X'X
xtxi : float
(X'X)^-1
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
>>> X = np.hstack((np.ones(y.shape),X))
>>> ols=BaseOLS(y,X)
>>> ols.betas
array([[ 46.42818268],
[ 0.62898397],
[ -0.48488854]])
>>> ols.vm
array([[ 174.02245348, -6.52060364, -2.15109867],
[ -6.52060364, 0.28720001, 0.06809568],
[ -2.15109867, 0.06809568, 0.03336939]])
"""
def __init__(self, y, x, robust=None, gwk=None, sig2n_k=True):
self.x = x
self.xtx = spdot(self.x.T, self.x)
xty = spdot(self.x.T, y)
self.xtxi = la.inv(self.xtx)
self.betas = np.dot(self.xtxi, xty)
predy = spdot(self.x, self.betas)
u = y - predy
self.u = u
self.predy = predy
self.y = y
self.n, self.k = self.x.shape
if sig2n_k:
self.sig2 = self.sig2n_k
else:
self.sig2 = self.sig2n
if robust is not None:
self.vm = ROBUST.robust_vm(reg=self, gwk=gwk, sig2n_k=sig2n_k)
class OLS(BaseOLS):
"""
Ordinary least squares with results and diagnostics.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : pysal W object
Spatial weights object (required if running spatial
diagnostics)
robust : string
If 'white', then a White consistent estimator of the
variance-covariance matrix is given. If 'hac', then a
HAC consistent estimator of the variance-covariance
matrix is given. Default set to None.
gwk : pysal W object
Kernel spatial weights needed for HAC estimation. Note:
matrix must have ones along the main diagonal.
sig2n_k : boolean
If True, then use n-k to estimate sigma^2. If False, use n.
nonspat_diag : boolean
If True, then compute non-spatial diagnostics on
the regression.
spat_diag : boolean
If True, then compute Lagrange multiplier tests (requires
w). Note: see moran for further tests.
moran : boolean
If True, compute Moran's I on the residuals. Note:
requires spat_diag=True.
white_test : boolean
If True, compute White's specification robust test.
(requires nonspat_diag=True)
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
robust : string
Adjustment for robust standard errors
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
r2 : float
R squared
ar2 : float
Adjusted R squared
utu : float
Sum of squared residuals
sig2 : float
Sigma squared used in computations
sig2ML : float
Sigma squared (maximum likelihood)
f_stat : tuple
Statistic (float), p-value (float)
logll : float
Log likelihood
aic : float
Akaike information criterion
schwarz : float
Schwarz information criterion
std_err : array
1xk array of standard errors of the betas
t_stat : list of tuples
t statistic; each tuple contains the pair (statistic,
p-value), where each is a float
mulColli : float
Multicollinearity condition number
jarque_bera : dictionary
'jb': Jarque-Bera statistic (float); 'pvalue': p-value
(float); 'df': degrees of freedom (int)
breusch_pagan : dictionary
'bp': Breusch-Pagan statistic (float); 'pvalue': p-value
(float); 'df': degrees of freedom (int)
koenker_bassett : dictionary
'kb': Koenker-Bassett statistic (float); 'pvalue':
p-value (float); 'df': degrees of freedom (int)
white : dictionary
'wh': White statistic (float); 'pvalue': p-value (float);
'df': degrees of freedom (int)
lm_error : tuple
Lagrange multiplier test for spatial error model; tuple
contains the pair (statistic, p-value), where each is a
float
lm_lag : tuple
Lagrange multiplier test for spatial lag model; tuple
contains the pair (statistic, p-value), where each is a
float
rlm_error : tuple
Robust lagrange multiplier test for spatial error model;
tuple contains the pair (statistic, p-value), where each
is a float
rlm_lag : tuple
Robust lagrange multiplier test for spatial lag model;
tuple contains the pair (statistic, p-value), where each
is a float
lm_sarma : tuple
Lagrange multiplier test for spatial SARMA model; tuple
contains the pair (statistic, p-value), where each is a
float
moran_res : tuple
Moran's I for the residuals; tuple containing the triple
(Moran's I, standardized Moran's I, p-value)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
sig2n : float
Sigma squared (computed with n in the denominator)
sig2n_k : float
Sigma squared (computed with n-k in the denominator)
xtx : float
X'X
xtxi : float
(X'X)^-1
Examples
--------
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; also, the actual OLS class
requires data to be passed in as numpy arrays so the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an nx1 numpy array.
>>> hoval = db.by_col("HOVAL")
>>> y = np.array(hoval)
>>> y.shape = (len(hoval), 1)
Extract CRIME (crime) and INC (income) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). pysal.spreg.OLS adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
The minimum parameters needed to run an ordinary least squares regression
are the two numpy arrays containing the independent variable and dependent
variables respectively. To make the printed results more meaningful, the
user can pass in explicit names for the variables used; this is optional.
>>> ols = OLS(y, X, name_y='home value', name_x=['income','crime'], name_ds='columbus', white_test=True)
pysal.spreg.OLS computes the regression coefficients and their standard
errors, t-stats and p-values. It also computes a large battery of
diagnostics on the regression. In this example we compute the white test
which by default isn't ('white_test=True'). All of these results can be independently
accessed as attributes of the regression object created by running
pysal.spreg.OLS. They can also be accessed at one time by printing the
summary attribute of the regression object. In the example below, the
parameter on crime is -0.4849, with a t-statistic of -2.6544 and p-value
of 0.01087.
>>> ols.betas
array([[ 46.42818268],
[ 0.62898397],
[ -0.48488854]])
>>> print round(ols.t_stat[2][0],3)
-2.654
>>> print round(ols.t_stat[2][1],3)
0.011
>>> print round(ols.r2,3)
0.35
Or we can easily obtain a full summary of all the results nicely formatted and
ready to be printed:
>>> print ols.summary
REGRESSION
----------
SUMMARY OF OUTPUT: ORDINARY LEAST SQUARES
-----------------------------------------
Data set : columbus
Dependent Variable : home value Number of Observations: 49
Mean dependent var : 38.4362 Number of Variables : 3
S.D. dependent var : 18.4661 Degrees of Freedom : 46
R-squared : 0.3495
Adjusted R-squared : 0.3212
Sum squared residual: 10647.015 F-statistic : 12.3582
Sigma-square : 231.457 Prob(F-statistic) : 5.064e-05
S.E. of regression : 15.214 Log likelihood : -201.368
Sigma-square ML : 217.286 Akaike info criterion : 408.735
S.E of regression ML: 14.7406 Schwarz criterion : 414.411
<BLANKLINE>
------------------------------------------------------------------------------------
Variable Coefficient Std.Error t-Statistic Probability
------------------------------------------------------------------------------------
CONSTANT 46.4281827 13.1917570 3.5194844 0.0009867
crime -0.4848885 0.1826729 -2.6544086 0.0108745
income 0.6289840 0.5359104 1.1736736 0.2465669
------------------------------------------------------------------------------------
<BLANKLINE>
REGRESSION DIAGNOSTICS
MULTICOLLINEARITY CONDITION NUMBER 12.538
<BLANKLINE>
TEST ON NORMALITY OF ERRORS
TEST DF VALUE PROB
Jarque-Bera 2 39.706 0.0000
<BLANKLINE>
DIAGNOSTICS FOR HETEROSKEDASTICITY
RANDOM COEFFICIENTS
TEST DF VALUE PROB
Breusch-Pagan test 2 5.767 0.0559
Koenker-Bassett test 2 2.270 0.3214
<BLANKLINE>
SPECIFICATION ROBUST TEST
TEST DF VALUE PROB
White 5 2.906 0.7145
================================ END OF REPORT =====================================
If the optional parameters w and spat_diag are passed to pysal.spreg.OLS,
spatial diagnostics will also be computed for the regression. These
include Lagrange multiplier tests and Moran's I of the residuals. The w
parameter is a PySAL spatial weights matrix. In this example, w is built
directly from the shapefile columbus.shp, but w can also be read in from a
GAL or GWT file. In this case a rook contiguity weights matrix is built,
but PySAL also offers queen contiguity, distance weights and k nearest
neighbor weights among others. In the example, the Moran's I of the
residuals is 0.204 with a standardized value of 2.592 and a p-value of
0.0095.
>>> w = pysal.weights.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> ols = OLS(y, X, w, spat_diag=True, moran=True, name_y='home value', name_x=['income','crime'], name_ds='columbus')
>>> ols.betas
array([[ 46.42818268],
[ 0.62898397],
[ -0.48488854]])
>>> print round(ols.moran_res[0],3)
0.204
>>> print round(ols.moran_res[1],3)
2.592
>>> print round(ols.moran_res[2],4)
0.0095
"""
def __init__(self, y, x,
w=None,
robust=None, gwk=None, sig2n_k=True,
nonspat_diag=True, spat_diag=False, moran=False,
white_test=False, vm=False, name_y=None, name_x=None,
name_w=None, name_gwk=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y)
USER.check_robust(robust, gwk)
USER.check_spat_diag(spat_diag, w)
x_constant = USER.check_constant(x)
BaseOLS.__init__(self, y=y, x=x_constant, robust=robust,
gwk=gwk, sig2n_k=sig2n_k)
self.title = "ORDINARY LEAST SQUARES"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.robust = USER.set_robust(robust)
self.name_w = USER.set_name_w(name_w, w)
self.name_gwk = USER.set_name_w(name_gwk, gwk)
SUMMARY.OLS(reg=self, vm=vm, w=w, nonspat_diag=nonspat_diag,
spat_diag=spat_diag, moran=moran, white_test=white_test)
def _test():
import doctest
# the following line could be used to define an alternative to the '<BLANKLINE>' flag
#doctest.BLANKLINE_MARKER = 'something better than <BLANKLINE>'
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
db = pysal.open(pysal.examples.get_path("columbus.dbf"), 'r')
y_var = 'CRIME'
y = np.array([db.by_col(y_var)]).reshape(49, 1)
x_var = ['INC', 'HOVAL']
x = np.array([db.by_col(name) for name in x_var]).T
w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
w.transform = 'r'
ols = OLS(
y, x, w=w, nonspat_diag=True, spat_diag=True, name_y=y_var, name_x=x_var,
name_ds='columbus', name_w='columbus.gal', robust='white', sig2n_k=True, moran=True)
print ols.summary
| pastephens/pysal | pysal/spreg/ols.py | Python | bsd-3-clause | 19,613 | [
"COLUMBUS"
] | 417e7903b281ad559905083b4110d42b6e1ee181d5b0be0cbd7d96484272dff5 |
"""
Gaussian mixture model vs. hidden Markov model with Gaussian mixture emissions vs.
hidden Markov model.
"""
import sdsModels as sdsm
exp = sdsm.Experiment(pathToData='../data/complete_april_2014_ratings-latest.csv')
exp2 = sdsm.Experiment(pathToData='../data/complete_april_2014_ratings-latest.csv')
gmmhmm = sdsm.Gmmhmm({
'num_mixc': 2,
'cov_type': 'diag',
'states': [1, 2, 3, 4, 5]
})
exp.addModel(gmmhmm)
gmm = sdsm.Gmm({
'num_mixc': 2,
'cov_type': 'diag'
})
exp.addModel(gmm)
# create hidden Markov model
hmm = sdsm.Hmm({
'states': [1, 2, 3, 4, 5]
})
exp2.addModel(hmm)
# exp.addModel(sdsm.Dummy({'strategy':'constant', 'constant': 3.0}))
# run experiment
num_pc = 3
features = ["pc-" + str(num_pc) + "-" + str(i) for i in xrange(1, num_pc + 1)]
exp.generateResults(features, cvMethod='kfolds', k=10)
features2 = ['words-system', 'asr-conf']
exp2.generateResults(features2, cvMethod='kfolds', k=10)
# output results
exp.printResults(['model', 'accuracy', 'r2'])
exp2.printResults(['model', 'accuracy', 'r2'])
| phihes/sds-models | examples/all_models_different_features__plain.py | Python | mit | 1,051 | [
"Gaussian"
] | 538735de047779d2cbc145e5125a59805731f7b6285a4ab53c7ce87b3b1cfed9 |
# changelog bisection for mercurial
#
# Copyright 2007 Matt Mackall
# Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
#
# Inspired by git bisect, extension skeleton taken from mq.py.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os, error
from i18n import _
from node import short, hex
import util
def bisect(changelog, state):
"""find the next node (if any) for testing during a bisect search.
returns a (nodes, number, good) tuple.
'nodes' is the final result of the bisect if 'number' is 0.
Otherwise 'number' indicates the remaining possible candidates for
the search and 'nodes' contains the next bisect target.
'good' is True if bisect is searching for a first good changeset, False
if searching for a first bad one.
"""
clparents = changelog.parentrevs
skip = set([changelog.rev(n) for n in state['skip']])
def buildancestors(bad, good):
# only the earliest bad revision matters
badrev = min([changelog.rev(n) for n in bad])
goodrevs = [changelog.rev(n) for n in good]
goodrev = min(goodrevs)
# build visit array
ancestors = [None] * (len(changelog) + 1) # an extra for [-1]
# set nodes descended from goodrevs
for rev in goodrevs:
ancestors[rev] = []
for rev in changelog.revs(goodrev + 1):
for prev in clparents(rev):
if ancestors[prev] == []:
ancestors[rev] = []
# clear good revs from array
for rev in goodrevs:
ancestors[rev] = None
for rev in changelog.revs(len(changelog), goodrev):
if ancestors[rev] is None:
for prev in clparents(rev):
ancestors[prev] = None
if ancestors[badrev] is None:
return badrev, None
return badrev, ancestors
good = False
badrev, ancestors = buildancestors(state['bad'], state['good'])
if not ancestors: # looking for bad to good transition?
good = True
badrev, ancestors = buildancestors(state['good'], state['bad'])
bad = changelog.node(badrev)
if not ancestors: # now we're confused
if len(state['bad']) == 1 and len(state['good']) == 1:
raise util.Abort(_("starting revisions are not directly related"))
raise util.Abort(_("inconsistent state, %s:%s is good and bad")
% (badrev, short(bad)))
# build children dict
children = {}
visit = util.deque([badrev])
candidates = []
while visit:
rev = visit.popleft()
if ancestors[rev] == []:
candidates.append(rev)
for prev in clparents(rev):
if prev != -1:
if prev in children:
children[prev].append(rev)
else:
children[prev] = [rev]
visit.append(prev)
candidates.sort()
# have we narrowed it down to one entry?
# or have all other possible candidates besides 'bad' have been skipped?
tot = len(candidates)
unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
if tot == 1 or not unskipped:
return ([changelog.node(rev) for rev in candidates], 0, good)
perfect = tot // 2
# find the best node to test
best_rev = None
best_len = -1
poison = set()
for rev in candidates:
if rev in poison:
# poison children
poison.update(children.get(rev, []))
continue
a = ancestors[rev] or [rev]
ancestors[rev] = None
x = len(a) # number of ancestors
y = tot - x # number of non-ancestors
value = min(x, y) # how good is this test?
if value > best_len and rev not in skip:
best_len = value
best_rev = rev
if value == perfect: # found a perfect candidate? quit early
break
if y < perfect and rev not in skip: # all downhill from here?
# poison children
poison.update(children.get(rev, []))
continue
for c in children.get(rev, []):
if ancestors[c]:
ancestors[c] = list(set(ancestors[c] + a))
else:
ancestors[c] = a + [c]
assert best_rev is not None
best_node = changelog.node(best_rev)
return ([best_node], tot, good)
def load_state(repo):
state = {'current': [], 'good': [], 'bad': [], 'skip': []}
if os.path.exists(repo.join("bisect.state")):
for l in repo.opener("bisect.state"):
kind, node = l[:-1].split()
node = repo.lookup(node)
if kind not in state:
raise util.Abort(_("unknown bisect kind %s") % kind)
state[kind].append(node)
return state
def save_state(repo, state):
f = repo.opener("bisect.state", "w", atomictemp=True)
wlock = repo.wlock()
try:
for kind in sorted(state):
for node in state[kind]:
f.write("%s %s\n" % (kind, hex(node)))
f.close()
finally:
wlock.release()
def get(repo, status):
"""
Return a list of revision(s) that match the given status:
- ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
- ``goods``, ``bads`` : csets topologically good/bad
- ``range`` : csets taking part in the bisection
- ``pruned`` : csets that are goods, bads or skipped
- ``untested`` : csets whose fate is yet unknown
- ``ignored`` : csets ignored due to DAG topology
- ``current`` : the cset currently being bisected
"""
state = load_state(repo)
if status in ('good', 'bad', 'skip', 'current'):
return map(repo.changelog.rev, state[status])
else:
# In the following sets, we do *not* call 'bisect()' with more
# than one level of recursion, because that can be very, very
# time consuming. Instead, we always develop the expression as
# much as possible.
# 'range' is all csets that make the bisection:
# - have a good ancestor and a bad descendant, or conversely
# that's because the bisection can go either way
range = '( bisect(bad)::bisect(good) | bisect(good)::bisect(bad) )'
_t = repo.revs('bisect(good)::bisect(bad)')
# The sets of topologically good or bad csets
if len(_t) == 0:
# Goods are topologically after bads
goods = 'bisect(good)::' # Pruned good csets
bads = '::bisect(bad)' # Pruned bad csets
else:
# Goods are topologically before bads
goods = '::bisect(good)' # Pruned good csets
bads = 'bisect(bad)::' # Pruned bad csets
# 'pruned' is all csets whose fate is already known: good, bad, skip
skips = 'bisect(skip)' # Pruned skipped csets
pruned = '( (%s) | (%s) | (%s) )' % (goods, bads, skips)
# 'untested' is all cset that are- in 'range', but not in 'pruned'
untested = '( (%s) - (%s) )' % (range, pruned)
# 'ignored' is all csets that were not used during the bisection
# due to DAG topology, but may however have had an impact.
# E.g., a branch merged between bads and goods, but whose branch-
# point is out-side of the range.
iba = '::bisect(bad) - ::bisect(good)' # Ignored bads' ancestors
iga = '::bisect(good) - ::bisect(bad)' # Ignored goods' ancestors
ignored = '( ( (%s) | (%s) ) - (%s) )' % (iba, iga, range)
if status == 'range':
return repo.revs(range)
elif status == 'pruned':
return repo.revs(pruned)
elif status == 'untested':
return repo.revs(untested)
elif status == 'ignored':
return repo.revs(ignored)
elif status == "goods":
return repo.revs(goods)
elif status == "bads":
return repo.revs(bads)
else:
raise error.ParseError(_('invalid bisect state'))
def label(repo, node):
rev = repo.changelog.rev(node)
# Try explicit sets
if rev in get(repo, 'good'):
# i18n: bisect changeset status
return _('good')
if rev in get(repo, 'bad'):
# i18n: bisect changeset status
return _('bad')
if rev in get(repo, 'skip'):
# i18n: bisect changeset status
return _('skipped')
if rev in get(repo, 'untested') or rev in get(repo, 'current'):
# i18n: bisect changeset status
return _('untested')
if rev in get(repo, 'ignored'):
# i18n: bisect changeset status
return _('ignored')
# Try implicit sets
if rev in get(repo, 'goods'):
# i18n: bisect changeset status
return _('good (implicit)')
if rev in get(repo, 'bads'):
# i18n: bisect changeset status
return _('bad (implicit)')
return None
def shortlabel(label):
if label:
return label[0].upper()
return None
| iaddict/mercurial.rb | vendor/mercurial/mercurial/hbisect.py | Python | mit | 9,226 | [
"VisIt"
] | 82d9a1cdbfe930cd84b89b75c1c811bf9709fa8ad803688cf5019a79a67279d7 |
from django.test import TestCase
from django.core import mail
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from model_mommy import mommy
from .models import Location
class DetailTest(TestCase):
def setUp(self):
user = User.objects.create_user('test', 'no@no.no', 'password')
mommy.make_recipe('locations.san_francisco')
def test_location_detail(self):
response = self.client.get(
reverse('locations:detail', args=("san-francisco",))
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "San Francisco")
def test_location_detail_related(self):
mommy.make('projects.team', user=User.objects.get(username='test'))
self.client.login(username='test', password='password')
self.client.get(reverse('registration:base') + 'san-francisco/')
response = self.client.get(
reverse('locations:detail', args=("san-francisco",))
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Visit Project")
def test_location_detail_sponsor(self):
mommy.make(
'locations.sponsor',
location = Location.objects.get(name='San Francisco'),
name = 'John Glenn'
)
response = self.client.get(
reverse('locations:detail', args=("san-francisco",))
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "John Glenn")
def test_location_detail_lead(self):
mommy.make(
'locations.lead',
location = Location.objects.get(name='San Francisco'),
lead = User.objects.get(username='test'),
)
response = self.client.get(
reverse('locations:detail', args=("san-francisco",))
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Email")
def test_location_detail_resource(self):
mommy.make(
'locations.resource',
location = Location.objects.get(name='San Francisco'),
name = 'John Glenn'
)
response = self.client.get(
reverse('locations:detail', args=("san-francisco",))
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "John Glenn")
class ListTest(TestCase):
def setUp(self):
mommy.make_recipe('locations.san_francisco')
def test_list_locations(self):
response = self.client.get(
reverse('locations:list')
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "San Francisco")
class EditTest(TestCase):
def setUp(self):
User.objects.create_user('test', 'no@no.no', 'password')
User.objects.create_superuser('super', 'no@no.no', 'password')
mommy.make_recipe('locations.san_francisco')
def test_can_edit_location_unauthenticated(self):
response = self.client.get(
reverse('locations:edit', args=('san-francisco',))
)
self.assertNotEqual(response.status_code, 200)
def test_can_edit_location_unprivileged(self):
self.client.login(username='test', password='password')
response = self.client.get(
reverse('locations:edit', args=('san-francisco',))
)
self.assertEqual(response.status_code, 403)
def test_can_edit_location_as_lead(self):
mommy.make(
'locations.lead',
location = Location.objects.get(name='San Francisco'),
lead = User.objects.get(username='test'),
)
self.client.login(username='test', password='password')
response = self.client.get(
reverse('locations:edit', args=('san-francisco',))
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "San Francisco")
def test_can_edit_location_as_super(self):
self.client.login(username='super', password='password')
response = self.client.get(
reverse('locations:edit', args=('san-francisco',))
)
self.assertEqual(response.status_code, 200)
def test_edit_location(self):
self.client.login(username='super', password='password')
response = self.client.post(
reverse('locations:edit', args=('san-francisco',)), {
'name': 'San Francisco',
'slug': 'san-francisco',
'description': 'Description',
'timezone': '-8.0',
'city': 'San Francisco',
'country': 'US',
'continent': 'NA',
'lat': '0',
'lon': '0',
'capacity': '100',
'sponsor-TOTAL_FORMS': '0',
'sponsor-INITIAL_FORMS': '0',
'lead-TOTAL_FORMS': '0',
'lead-INITIAL_FORMS': '0',
'localaward-TOTAL_FORMS': '0',
'localaward-INITIAL_FORMS': '0',
'nomination-TOTAL_FORMS': '0',
'nomination-INITIAL_FORMS': '0',
'resource-TOTAL_FORMS': '0',
'resource-INITIAL_FORMS': '0',
}),
self.assertTrue(Location.objects.get(description='Description'))
class AttendeesTest(TestCase):
def setUp(self):
User.objects.create_user('test', 'no@no.no', 'password')
User.objects.create_superuser('super', 'no@no.no', 'password')
mommy.make_recipe('locations.san_francisco')
def test_view_attendees_unauthenticated(self):
response = self.client.get(
reverse('locations:attendees', args=('san-francisco',))
)
self.assertNotEqual(response.status_code, 200)
def test_view_attendees_unprivilged(self):
self.client.login(username='test', password='password')
response = self.client.get(
reverse('locations:attendees', args=('san-francisco',))
)
self.assertNotEqual(response.status_code, 200)
def test_view_attendees_as_lead(self):
mommy.make(
'locations.lead',
location = Location.objects.get(name='San Francisco'),
lead = User.objects.get(username='test'),
)
self.client.login(username='test', password='password')
self.client.get(reverse('registration:base') + 'san-francisco/')
response = self.client.get(
reverse('locations:attendees', args=('san-francisco',))
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "no@no.no")
def test_view_attendees_as_super(self):
self.client.login(username='super', password='password')
self.client.get(reverse('registration:base') + 'san-francisco/')
response = self.client.get(
reverse('locations:attendees', args=('san-francisco',))
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "no@no.no")
class CSVTest(TestCase):
def setUp(self):
User.objects.create_user('test', 'no@no.no', 'password')
User.objects.create_superuser('super', 'no@no.no', 'password')
mommy.make_recipe('locations.san_francisco')
self.client.login(username='test', password='password')
self.client.get(reverse('registration:base') + 'san-francisco/')
def test_export_attendees_unauthenticated(self):
response = self.client.get(
reverse('locations:export', args=('san-francisco',))
)
self.assertNotEqual(response.status_code, 200)
def test_export_attendees_unprivilged(self):
self.client.login(username='test', password='password')
response = self.client.get(
reverse('locations:export', args=('san-francisco',))
)
self.assertNotEqual(response.status_code, 200)
def test_export_attendees_as_lead(self):
mommy.make(
'locations.lead',
location = Location.objects.get(name='San Francisco'),
lead = User.objects.get(username='test'),
)
self.client.login(username='test', password='password')
response = self.client.get(
reverse('locations:export', args=('san-francisco',))
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "no@no.no")
def test_export_attendees_as_super(self):
self.client.login(username='super', password='password')
self.client.get(reverse('registration:base') + 'san-francisco/')
response = self.client.get(
reverse('locations:export', args=('san-francisco',))
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "no@no.no")
class RelatedTest(TestCase):
def setUp(self):
User.objects.create_user('test', 'no@no.no', 'password')
mommy.make_recipe('locations.san_francisco')
self.client.login(username='test', password='password')
self.client.get(
reverse('registration:register', args=('san-francisco',))
)
mommy.make('projects.team', user=User.objects.get(username='test'))
def test_related_projects(self):
response = self.client.get(
reverse('locations:related', args=('san-francisco',))
)
print response
self.assertEqual(response.status_code, 200)
self.assertContains(response, "project") | nasa/39A | spaceapps/locations/tests.py | Python | apache-2.0 | 9,651 | [
"VisIt"
] | 020efcb62d43a687f605dbed282484e27cf4f2a92cd7799a084535068735a619 |
from ase.lattice.surface import fcc100, add_adsorbate
from gpaw import GPAW
from gpaw.poisson import PoissonSolver
from gpaw.dipole_correction import DipoleCorrection
slab = fcc100('Al', (2, 2, 2), a=4.05, vacuum=7.5)
add_adsorbate(slab, 'Na', 4.0)
slab.center(axis=2)
slab.calc = GPAW(txt='zero.txt',
xc='PBE',
setups={'Na': '1'},
kpts=(4, 4, 1))
e1 = slab.get_potential_energy()
slab.calc.write('zero.gpw')
slab.pbc = True
slab.calc.set(txt='periodic.txt')
e2 = slab.get_potential_energy()
slab.calc.write('periodic.gpw')
slab.pbc = (True, True, False)
slab.calc.set(poissonsolver=DipoleCorrection(PoissonSolver(), 2),
txt='corrected.txt')
e3 = slab.get_potential_energy()
slab.calc.write('corrected.gpw')
| robwarm/gpaw-symm | doc/tutorials/dipole_correction/dipole.py | Python | gpl-3.0 | 777 | [
"ASE",
"GPAW"
] | 728fbf431b1cb91ed371ba23dd912a012752e8412554bf46d40246ed33b31e0f |
"""Copyleft 2010 Forrest Sheng Bao http://fsbao.net
PyEEG, a Python module to extract EEG features, v 0.02_r2
Project homepage: http://pyeeg.org
**Data structure**
PyEEG only uses standard Python and numpy data structures,
so you need to import numpy before using it.
For numpy, please visit http://numpy.scipy.org
**Naming convention**
I follow "Style Guide for Python Code" to code my program
http://www.python.org/dev/peps/pep-0008/
Constants: UPPER_CASE_WITH_UNDERSCORES, e.g., SAMPLING_RATE, LENGTH_SIGNAL.
Function names: lower_case_with_underscores, e.g., spectrum_entropy.
Variables (global and local): CapitalizedWords or CapWords, e.g., Power.
If a variable name consists of one letter, I may use lower case, e.g., x, y.
Functions listed alphabetically
--------------------------------------------------
"""
from numpy.fft import fft
from numpy import zeros, floor, log10, log, mean, array, sqrt, vstack, cumsum, \
ones, log2, std
from numpy.linalg import svd, lstsq
import time
######################## Functions contributed by Xin Liu #################
def hurst(X):
""" Compute the Hurst exponent of X. If the output H=0.5,the behavior
of the time-series is similar to random walk. If H<0.5, the time-series
cover less "distance" than a random walk, vice verse.
Parameters
----------
X
list
a time series
Returns
-------
H
float
Hurst exponent
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> a = randn(4096)
>>> pyeeg.hurst(a)
>>> 0.5057444
"""
N = len(X)
T = array([float(i) for i in xrange(1,N+1)])
Y = cumsum(X)
Ave_T = Y/T
S_T = zeros((N))
R_T = zeros((N))
for i in xrange(N):
S_T[i] = std(X[:i+1])
X_T = Y - T * Ave_T[i]
R_T[i] = max(X_T[:i + 1]) - min(X_T[:i + 1])
R_S = R_T / S_T
R_S = log(R_S)
n = log(T).reshape(N, 1)
H = lstsq(n[1:], R_S[1:])[0]
return H[0]
######################## Begin function definitions #######################
def embed_seq(X,Tau,D):
"""Build a set of embedding sequences from given time series X with lag Tau
and embedding dimension DE. Let X = [x(1), x(2), ... , x(N)], then for each
i such that 1 < i < N - (D - 1) * Tau, we build an embedding sequence,
Y(i) = [x(i), x(i + Tau), ... , x(i + (D - 1) * Tau)]. All embedding
sequence are placed in a matrix Y.
Parameters
----------
X
list
a time series
Tau
integer
the lag or delay when building embedding sequence
D
integer
the embedding dimension
Returns
-------
Y
2-D list
embedding matrix built
Examples
---------------
>>> import pyeeg
>>> a=range(0,9)
>>> pyeeg.embed_seq(a,1,4)
array([[ 0., 1., 2., 3.],
[ 1., 2., 3., 4.],
[ 2., 3., 4., 5.],
[ 3., 4., 5., 6.],
[ 4., 5., 6., 7.],
[ 5., 6., 7., 8.]])
>>> pyeeg.embed_seq(a,2,3)
array([[ 0., 2., 4.],
[ 1., 3., 5.],
[ 2., 4., 6.],
[ 3., 5., 7.],
[ 4., 6., 8.]])
>>> pyeeg.embed_seq(a,4,1)
array([[ 0.],
[ 1.],
[ 2.],
[ 3.],
[ 4.],
[ 5.],
[ 6.],
[ 7.],
[ 8.]])
"""
N =len(X)
if D * Tau > N:
print "Cannot build such a matrix, because D * Tau > N"
exit()
if Tau<1:
print "Tau has to be at least 1"
exit()
Y=zeros((N - (D - 1) * Tau, D))
for i in xrange(0, N - (D - 1) * Tau):
for j in xrange(0, D):
Y[i][j] = X[i + j * Tau]
return Y
def in_range(Template, Scroll, Distance):
"""Determines whether one vector is the the range of another vector.
The two vectors should have equal length.
Parameters
-----------------
Template
list
The template vector, one of two vectors being compared
Scroll
list
The scroll vector, one of the two vectors being compared
D
float
Two vectors match if their distance is less than D
Bit
Notes
-------
The distance between two vectors can be defined as Euclidean distance
according to some publications.
The two vector should of equal length
"""
for i in range(0, len(Template)):
if abs(Template[i] - Scroll[i]) > Distance:
return False
return True
""" Desperate code, but do not delete
def bit_in_range(Index):
if abs(Scroll[Index] - Template[Bit]) <= Distance :
print "Bit=", Bit, "Scroll[Index]", Scroll[Index], "Template[Bit]",\
Template[Bit], "abs(Scroll[Index] - Template[Bit])",\
abs(Scroll[Index] - Template[Bit])
return Index + 1 # move
Match_No_Tail = range(0, len(Scroll) - 1) # except the last one
# print Match_No_Tail
# first compare Template[:-2] and Scroll[:-2]
for Bit in xrange(0, len(Template) - 1): # every bit of Template is in range of Scroll
Match_No_Tail = filter(bit_in_range, Match_No_Tail)
print Match_No_Tail
# second and last, check whether Template[-1] is in range of Scroll and
# Scroll[-1] in range of Template
# 2.1 Check whether Template[-1] is in the range of Scroll
Bit = - 1
Match_All = filter(bit_in_range, Match_No_Tail)
# 2.2 Check whether Scroll[-1] is in the range of Template
# I just write a loop for this.
for i in Match_All:
if abs(Scroll[-1] - Template[i] ) <= Distance:
Match_All.remove(i)
return len(Match_All), len(Match_No_Tail)
"""
def bin_power(X,Band,Fs):
"""Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins.
"""
C = fft(X)
C = abs(C)
Power =zeros(len(Band)-1);
for Freq_Index in xrange(0,len(Band)-1):
Freq = float(Band[Freq_Index]) ## Xin Liu
Next_Freq = float(Band[Freq_Index+1])
Power[Freq_Index] = sum(C[floor(Freq/Fs*len(X)):floor(Next_Freq/Fs*len(X))])
Power_Ratio = Power/sum(Power)
return Power, Power_Ratio
def first_order_diff(X):
""" Compute the first order difference of a time series.
For a time series X = [x(1), x(2), ... , x(N)], its first order
difference is:
Y = [x(2) - x(1) , x(3) - x(2), ..., x(N) - x(N-1)]
"""
D=[]
for i in xrange(1,len(X)):
D.append(X[i]-X[i-1])
return D
def pfd(X, D=None):
"""Compute Petrosian Fractal Dimension of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, the first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed by first_order_diff(X) function of pyeeg
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
"""
if D is None: ## Xin Liu
D = first_order_diff(X)
N_delta= 0; #number of sign changes in derivative of the signal
for i in xrange(1,len(D)):
if D[i]*D[i-1]<0:
N_delta += 1
n = len(X)
return log10(n)/(log10(n)+log10(n/n+0.4*N_delta))
def hfd(X, Kmax):
""" Compute Hjorth Fractal Dimension of a time series X, kmax
is an HFD parameter
"""
L = [];
x = []
N = len(X)
for k in xrange(1,Kmax):
Lk = []
for m in xrange(0,k):
Lmk = 0
for i in xrange(1,int(floor((N-m)/k))):
Lmk += abs(X[m+i*k] - X[m+i*k-k])
Lmk = Lmk*(N - 1)/floor((N - m) / float(k)) / k
Lk.append(Lmk)
L.append(log(mean(Lk)))
x.append([log(float(1) / k), 1])
(p, r1, r2, s)=lstsq(x, L)
return p[0]
def hjorth(X, D = None):
""" Compute Hjorth mobility and complexity of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, a first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed by first_order_diff(X) function of pyeeg
Notes
-----
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
Parameters
----------
X
list
a time series
D
list
first order differential sequence of a time series
Returns
-------
As indicated in return line
Hjorth mobility and complexity
"""
if D is None:
D = first_order_diff(X)
D.insert(0, X[0]) # pad the first difference
D = array(D)
n = len(X)
M2 = float(sum(D ** 2)) / n
TP = sum(array(X) ** 2)
M4 = 0;
for i in xrange(1, len(D)):
M4 += (D[i] - D[i - 1]) ** 2
M4 = M4 / n
return sqrt(M2 / TP), sqrt(float(M4) * TP / M2 / M2) # Hjorth Mobility and Complexity
def spectral_entropy(X, Band, Fs, Power_Ratio = None):
"""Compute spectral entropy of a time series from either two cases below:
1. X, the time series (default)
2. Power_Ratio, a list of normalized signal power in a set of frequency
bins defined in Band (if Power_Ratio is provided, recommended to speed up)
In case 1, Power_Ratio is computed by bin_power() function.
Notes
-----
To speed up, it is recommended to compute Power_Ratio before calling this
function because it may also be used by other functions whereas computing
it here again will slow down.
Parameters
----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
As indicated in return line
See Also
--------
bin_power: pyeeg function that computes spectral power in frequency bins
"""
if Power_Ratio is None:
Power, Power_Ratio = bin_power(X, Band, Fs)
Spectral_Entropy = 0
for i in xrange(0, len(Power_Ratio) - 1):
Spectral_Entropy += Power_Ratio[i] * log(Power_Ratio[i])
Spectral_Entropy /= log(len(Power_Ratio)) # to save time, minus one is omitted
return -1 * Spectral_Entropy
def svd_entropy(X, Tau, DE, W = None):
"""Compute SVD Entropy from either two cases below:
1. a time series X, with lag tau and embedding dimension dE (default)
2. a list, W, of normalized singular values of a matrix (if W is provided,
recommend to speed up.)
If W is None, the function will do as follows to prepare singular spectrum:
First, computer an embedding matrix from X, Tau and DE using pyeeg
function embed_seq():
M = embed_seq(X, Tau, DE)
Second, use scipy.linalg function svd to decompose the embedding matrix
M and obtain a list of singular values:
W = svd(M, compute_uv=0)
At last, normalize W:
W /= sum(W)
Notes
-------------
To speed up, it is recommended to compute W before calling this function
because W may also be used by other functions whereas computing it here
again will slow down.
"""
if W is None:
Y = EmbedSeq(X, tau, dE)
W = svd(Y, compute_uv = 0)
W /= sum(W) # normalize singular values
return -1*sum(W * log(W))
def fisher_info(X, Tau, DE, W = None):
""" Compute Fisher information of a time series from either two cases below:
1. X, a time series, with lag Tau and embedding dimension DE (default)
2. W, a list of normalized singular values, i.e., singular spectrum (if W is
provided, recommended to speed up.)
If W is None, the function will do as follows to prepare singular spectrum:
First, computer an embedding matrix from X, Tau and DE using pyeeg
function embed_seq():
M = embed_seq(X, Tau, DE)
Second, use scipy.linalg function svd to decompose the embedding matrix
M and obtain a list of singular values:
W = svd(M, compute_uv=0)
At last, normalize W:
W /= sum(W)
Parameters
----------
X
list
a time series. X will be used to build embedding matrix and compute
singular values if W or M is not provided.
Tau
integer
the lag or delay when building a embedding sequence. Tau will be used
to build embedding matrix and compute singular values if W or M is not
provided.
DE
integer
the embedding dimension to build an embedding matrix from a given
series. DE will be used to build embedding matrix and compute
singular values if W or M is not provided.
W
list or array
the set of singular values, i.e., the singular spectrum
Returns
-------
FI
integer
Fisher information
Notes
-----
To speed up, it is recommended to compute W before calling this function
because W may also be used by other functions whereas computing it here
again will slow down.
See Also
--------
embed_seq : embed a time series into a matrix
"""
if W is None:
M = embed_seq(X, Tau, DE)
W = svd(M, compute_uv = 0)
W /= sum(W)
FI = 0
for i in xrange(0, len(W) - 1): # from 1 to M
FI += ((W[i +1] - W[i]) ** 2) / (W[i])
return FI
def ap_entropy(X, M, R):
"""Computer approximate entropy (ApEN) of series X, specified by M and R.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of Em
is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension are
1 and M-1 respectively. Such a matrix can be built by calling pyeeg function
as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elments
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and Em[j]
is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two 1-D
vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance between them
is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the value of R is
defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M + 1, we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k. The probability that a random row in Em matches Em[i] is
\simga_1^{N-M+1} k[i] / (N - M + 1), thus sum(k)/ (N - M + 1),
denoted as Cm[i].
We repeat the same process on Emp and obtained Cmp[i], but here 0<i<N-M
since the length of each sequence in Emp is M + 1.
The probability that any two embedding sequences in Em match is then
sum(Cm)/ (N - M +1 ). We define Phi_m = sum(log(Cm)) / (N - M + 1) and
Phi_mp = sum(log(Cmp)) / (N - M ).
And the ApEn is defined as Phi_m - Phi_mp.
Notes
-----
#. Please be aware that self-match is also counted in ApEn.
#. This function now runs very slow. We are still trying to speed it up.
References
----------
Costa M, Goldberger AL, Peng CK, Multiscale entropy analysis of biolgical
signals, Physical Review E, 71:021906, 2005
See also
--------
samp_entropy: sample entropy of a time series
Notes
-----
Extremely slow implementation. Do NOT use if your dataset is not small.
"""
N = len(X)
Em = embed_seq(X, 1, M)
Emp = embed_seq(X, 1, M + 1) # try to only build Emp to save time
Cm, Cmp = zeros(N - M + 1), zeros(N - M)
# in case there is 0 after counting. Log(0) is undefined.
for i in xrange(0, N - M):
# print i
for j in xrange(i, N - M): # start from i, self-match counts in ApEn
# if max(abs(Em[i]-Em[j])) <= R:# compare N-M scalars in each subseq v 0.01b_r1
if in_range(Em[i], Em[j], R):
Cm[i] += 1 ### Xin Liu
Cm[j] += 1
if abs(Emp[i][-1] - Emp[j][-1]) <= R: # check last one
Cmp[i] += 1
Cmp[j] += 1
if in_range(Em[i], Em[N-M], R):
Cm[i] += 1
Cm[N-M] += 1
# try to count Cm[j] and Cmp[j] as well here
# if max(abs(Em[N-M]-Em[N-M])) <= R: # index from 0, so N-M+1 is N-M v 0.01b_r1
# if in_range(Em[i], Em[N - M], R): # for Cm, there is one more iteration than Cmp
# Cm[N - M] += 1 # cross-matches on Cm[N - M]
Cm[N - M] += 1 # Cm[N - M] self-matches
# import code;code.interact(local=locals())
Cm /= (N - M +1 )
Cmp /= ( N - M )
# import code;code.interact(local=locals())
Phi_m, Phi_mp = sum(log(Cm)), sum(log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En
def samp_entropy(X, M, R):
"""Computer sample entropy (SampEn) of series X, specified by M and R.
SampEn is very close to ApEn.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of Em
is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension are
1 and M-1 respectively. Such a matrix can be built by calling pyeeg function
as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elments
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and Em[j]
is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two 1-D
vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance between them
is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the value of R is
defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M , we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k.
We repeat the same process on Emp and obtained Cmp[i], 0 < i < N - M.
The SampEn is defined as log(sum(Cm)/sum(Cmp))
References
----------
Costa M, Goldberger AL, Peng C-K, Multiscale entropy analysis of biolgical
signals, Physical Review E, 71:021906, 2005
See also
--------
ap_entropy: approximate entropy of a time series
Notes
-----
Extremely slow computation. Do NOT use if your dataset is not small and you
are not patient enough.
"""
N = len(X)
Em = embed_seq(X, 1, M)
Emp = embed_seq(X, 1, M + 1)
Cm, Cmp = zeros(N - M - 1) + 1e-100, zeros(N - M - 1) + 1e-100
# in case there is 0 after counting. Log(0) is undefined.
for i in xrange(0, N - M):
for j in xrange(i + 1, N - M): # no self-match
# if max(abs(Em[i]-Em[j])) <= R: # v 0.01_b_r1
if in_range(Em[i], Em[j], R):
Cm[i] += 1
# if max(abs(Emp[i] - Emp[j])) <= R: # v 0.01_b_r1
if abs(Emp[i][-1] - Emp[j][-1]) <= R: # check last one
Cmp[i] += 1
Samp_En = log(sum(Cm)/sum(Cmp))
return Samp_En
def dfa(X, Ave = None, L = None):
"""Compute Detrended Fluctuation Analysis from a time series X and length of
boxes L.
The first step to compute DFA is to integrate the signal. Let original seres
be X= [x(1), x(2), ..., x(N)].
The integrated signal Y = [y(1), y(2), ..., y(N)] is otained as follows
y(k) = \sum_{i=1}^{k}{x(i)-Ave} where Ave is the mean of X.
The second step is to partition/slice/segment the integrated sequence Y into
boxes. At least two boxes are needed for computing DFA. Box sizes are
specified by the L argument of this function. By default, it is from 1/5 of
signal length to one (x-5)-th of the signal length, where x is the nearest
power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...
In each box, a linear least square fitting is employed on data in the box.
Denote the series on fitted line as Yn. Its k-th elements, yn(k),
corresponds to y(k).
For fitting in each box, there is a residue, the sum of squares of all
offsets, difference between actual points and points on fitted line.
F(n) denotes the square root of average total residue in all boxes when box
length is n, thus
Total_Residue = \sum_{k=1}^{N}{(y(k)-yn(k))}
F(n) = \sqrt(Total_Residue/N)
The computing to F(n) is carried out for every box length n. Therefore, a
relationship between n and F(n) can be obtained. In general, F(n) increases
when n increases.
Finally, the relationship between F(n) and n is analyzed. A least square
fitting is performed between log(F(n)) and log(n). The slope of the fitting
line is the DFA value, denoted as Alpha. To white noise, Alpha should be
0.5. Higher level of signal complexity is related to higher Alpha.
Parameters
----------
X:
1-D Python list or numpy array
a time series
Ave:
integer, optional
The average value of the time series
L:
1-D Python list of integers
A list of box size, integers in ascending order
Returns
-------
Alpha:
integer
the result of DFA analysis, thus the slope of fitting line of log(F(n))
vs. log(n). where n is the
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> print pyeeg.dfa(randn(4096))
0.490035110345
Reference
---------
Peng C-K, Havlin S, Stanley HE, Goldberger AL. Quantification of scaling
exponents and crossover phenomena in nonstationary heartbeat time series.
_Chaos_ 1995;5:82-87
Notes
-----
This value depends on the box sizes very much. When the input is a white
noise, this value should be 0.5. But, some choices on box sizes can lead to
the value lower or higher than 0.5, e.g. 0.38 or 0.58.
Based on many test, I set the box sizes from 1/5 of signal length to one
(x-5)-th of the signal length, where x is the nearest power of 2 from the
length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...
You may generate a list of box sizes and pass in such a list as a parameter.
"""
X = array(X)
if Ave is None:
Ave = mean(X)
Y = cumsum(X)
Y -= Ave
if L is None:
L = floor(len(X)*1/(2**array(range(4,int(log2(len(X)))-4))))
F = zeros(len(L)) # F(n) of different given box length n
for i in xrange(0,len(L)):
n = int(L[i]) # for each box length L[i]
if n==0:
print "time series is too short while the box length is too big"
print "abort"
exit()
for j in xrange(0,len(X),n): # for each box
if j+n < len(X):
c = range(j,j+n)
c = vstack([c, ones(n)]).T # coordinates of time in the box
y = Y[j:j+n] # the value of data in the box
F[i] += lstsq(c,y)[1] # add residue in this box
F[i] /= ((len(X)/n)*n)
F = sqrt(F)
Alpha = lstsq(vstack([log(L), ones(len(L))]).T,log(F))[0][0]
return Alpha
| fergalbyrne/nupic.kaggle-eeg | tools/pyeeg.py | Python | gpl-3.0 | 22,998 | [
"VisIt"
] | a90365b66d25d616adae31f318bf04119218b4b5dba944eb91773790ad10725b |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import logging as log
from synergetics.config import config as cfg
from synergetics.neurons.neurons import Neuron
class LIF(Neuron):
def __init__(self, position, excitatory=True,
update_rule= Default_update(),
discover_rule=Default_discovery()):
super(LIF, self).__init__(position, excitatory, update_rule, discover_rule)
def excite(self, excitor, excitement, timestamp):
pass
| synergetics/neuron | src/neurons/LIF/LIF.py | Python | mit | 488 | [
"NEURON"
] | c1a3d3749bde63f978ea36b3ff3c103f9ca6facfac838cb7ce1dc610565749dc |
"""
idnest
"""
__author__ = "Brian Balsamo"
__email__ = "balsamo@uchicago.edu"
__version__ = "0.0.1"
from uuid import uuid4
from abc import ABCMeta, abstractmethod
import logging
from flask import Blueprint, jsonify, abort, Response
from flask_restful import Resource, Api, reqparse
import redis
from pymongo import MongoClient, ASCENDING
from .exceptions import Error, ImproperConfigurationError
BLUEPRINT = Blueprint('idnest', __name__)
BLUEPRINT.config = {}
API = Api(BLUEPRINT)
log = logging.getLogger(__name__)
@BLUEPRINT.errorhandler(Error)
def handle_errors(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
class IStorageBackend(metaclass=ABCMeta):
"""
_Abstracts_
* mint_container
* rm_container
* ls_containers
* add_member
* rm_member
* ls_members
_Provided Convenience_
(Over-ride these if there is a faster way to do it in your implementation)
* mint_containers
* rm_containers
* container_exists
* add_members
* rm_members
* member_exists
"""
@abstractmethod
def mint_container(self):
pass
def mint_containers(self, num):
return [self.mint_container() for _ in range(num)]
@abstractmethod
def rm_container(self, c_id):
pass
def rm_containers(self, c_ids):
return [self.rm_container(c_id) for c_id in c_ids]
@abstractmethod
def ls_containers(self, cursor, limit):
pass
@abstractmethod
def container_exists(self, qc_id):
pass
@abstractmethod
def add_member(self, c_id, m_id):
pass
def add_members(self, c_id, m_ids):
return [self.add_member(c_id, m_id) for m_id in m_ids]
@abstractmethod
def ls_members(self, c_id, cursor, limit):
pass
@abstractmethod
def rm_member(self, c_id, m_id):
pass
def rm_members(self, c_id, m_ids):
return [self.rm_member(c_id, m_id) for m_id in m_ids]
@abstractmethod
def member_exists(self, c_id, qm_id):
pass
class RAMStorageBackend(IStorageBackend):
def __init__(self, bp):
self.data = {}
def mint_container(self):
new_c_id = uuid4().hex
self.data[new_c_id] = []
return new_c_id
def rm_container(self, c_id):
try:
del self.data[c_id]
except KeyError:
pass
return c_id
def ls_containers(self, cursor, limit):
def peek(cursor, limit):
try:
sorted(self.data.keys())[cursor + limit]
return str(cursor + limit)
except IndexError:
return None
cursor = int(cursor)
return peek(cursor, limit), list(sorted(self.data.keys()))[cursor:cursor + limit]
def add_member(self, c_id, m_id):
self.data[c_id].append(m_id)
return m_id
def rm_member(self, c_id, m_id):
try:
self.data[c_id].remove(m_id)
except ValueError:
pass
return m_id
def ls_members(self, c_id, cursor, limit):
def peek(cursor, limit):
try:
self.data[c_id][cursor + limit]
return str(cursor + limit)
except IndexError:
return None
cursor = int(cursor)
return peek(cursor, limit), self.data[c_id][cursor:cursor + limit]
def container_exists(self, c_id):
return c_id in self.data.keys()
def member_exists(self, c_id, m_id):
try:
return m_id in self.data[c_id]
except KeyError:
return False
class MongoStorageBackend(IStorageBackend):
def __init__(self, bp):
client = MongoClient(bp.config["MONGO_HOST"],
bp.config.get("MONGO_PORT", 27017))
self.db = client[bp.config["MONGO_DB"]]
def mint_container(self):
id = uuid4().hex
self.db.containers.insert_one({'members': [], '_id': id})
return id
def rm_container(self, c_id):
self.db.containers.delete_one({'_id': c_id})
return c_id
def ls_containers(self, cursor, limit):
def peek(cursor, limit):
if len([str(x['_id']) for x in
self.db.containers.find().sort('_id', ASCENDING).
skip(cursor + limit).limit(1)]) > 0:
return str(cursor + limit)
else:
return None
cursor = int(cursor)
return peek(cursor, limit), \
[str(x['_id']) for x in self.db.containers.find()
.sort('_id', ASCENDING)
.skip(cursor).limit(limit)]
def add_member(self, c_id, m_id):
r = self.db.containers.update_one({'_id': c_id}, {'$push': {'members': m_id}})
if r.modified_count < 1:
raise KeyError
return m_id
def rm_member(self, c_id, m_id):
self.db.containers.update_one({'_id': c_id}, {'$pull': {'members': m_id}})
return m_id
def ls_members(self, c_id, cursor, limit):
def peek(cursor, limit, members):
try:
members[cursor + limit]
return str(cursor + limit)
except IndexError:
return None
cursor = int(cursor)
c = self.db.containers.find_one({'_id': c_id})
return peek(cursor, limit, c['members']), c['members'][cursor:cursor + limit]
def container_exists(self, c_id):
return bool(self.db.containers.find_one({'_id': c_id}))
def member_exists(self, c_id, m_id):
c = self.db.containers.find_one({'_id': c_id})
if c is None:
return False
return m_id in c['members']
class RedisStorageBackend(IStorageBackend):
def __init__(self, bp):
self.r = redis.StrictRedis(
host=bp.config["REDIS_HOST"],
port=bp.config.get("REDIS_PORT", 6379),
db=bp.config["REDIS_DB"]
)
def mint_container(self):
c_id = uuid4().hex
self.r.lpush(c_id, 0)
return c_id
def rm_container(self, c_id):
self.r.delete(c_id)
return c_id
def ls_containers(self, cursor, limit):
results = []
# Using count here is weird, as redis doesn't garuntee that
# the number of returned elements is _exactly_ the count
# argument. See https://redis.io/commands/scan. Thus
# This implementation gets a little fuzzy, and things may actually
# return a # of elements slightly greater than limit at the moment
while cursor != 0 and limit > 0:
cursor, data = self.r.scan(cursor=cursor)
if limit:
limit = limit - len(data)
for item in data:
results.append(item)
return None if cursor == 0 else str(cursor), [x.decode("utf-8") for x in results]
def container_exists(self, c_id):
return c_id in self.r
def add_member(self, c_id, m_id):
if not self.container_exists(c_id):
raise KeyError(
"Can't put a member in a container that doesn't exist. c_id: {}".format(
c_id
)
)
self.r.rpush(c_id, m_id)
return m_id
def ls_members(self, c_id, cursor, limit):
def peek(c_id, cursor, limit):
if len([x for x in self.r.lrange(c_id, cursor + limit, cursor + limit)]) > 0:
return str(cursor + limit)
else:
return None
cursor = int(cursor)
# Skip the 0 we're using to keep Redis from deleting our key
if cursor == 0:
cursor = 1
return peek(c_id, cursor, limit), \
[x.decode("utf-8") for x in self.r.lrange(c_id, cursor, cursor + limit - 1)]
def rm_member(self, c_id, m_id):
self.r.lrem(c_id, 1, m_id)
return m_id
def member_exists(self, c_id, m_id):
return m_id in (x.decode("utf-8") for x in self.r.lrange(c_id, 1, -1))
def output_html(data, code, headers=None):
# https://github.com/flask-restful/flask-restful/issues/124
resp = Response(data, mimetype='text/html', headers=headers)
resp.status_code = code
return resp
def check_limit(limit):
if limit > BLUEPRINT.config.get("MAX_LIMIT", 1000):
log.warning(
"Received request above MAX_LIMIT (or 1000 if undefined), capping.")
limit = BLUEPRINT.config.get("MAX_LIMIT", 1000)
return limit
pagination_args_parser = reqparse.RequestParser()
pagination_args_parser.add_argument(
'cursor', type=str, default="0"
)
pagination_args_parser.add_argument(
'limit', type=int, default=1000
)
class Root(Resource):
def post(self):
log.info("Received POST @ root endpoint")
log.debug("Parsing arguments")
parser = reqparse.RequestParser()
parser.add_argument('num', type=int,
help="How many containers to mint.",
default=1)
args = parser.parse_args()
args['num'] = check_limit(args['num'])
log.debug("Arguments parsed")
return {
"Minted": [{"identifier": x, "_link": API.url_for(Container, container_id=x)} for
x in BLUEPRINT.config['storage'].mint_containers(args['num'])],
"_self": {"identifier": None, "_link": API.url_for(Root)}
}
def get(self):
log.info("Received GET @ root endpoint")
log.debug("Parsing args")
parser = pagination_args_parser.copy()
args = parser.parse_args()
args['limit'] = check_limit(args['limit'])
next_cursor, paginated_ids = BLUEPRINT.config['storage'].ls_containers(
cursor=args['cursor'], limit=args['limit'])
return {
"Containers": [{"identifier": x, "_link": API.url_for(Container, container_id=x)} for
x in paginated_ids],
"pagination": {
"cursor": args['cursor'],
"limit": args['limit'],
"next_cursor": next_cursor
},
"_self": {"identifier": None, "_link": API.url_for(Root)}
}
class HTMLMint(Resource):
def get(self):
log.info("Received GET @ HTML mint endpoint")
resp = """<html>
<body>
<h1>
Mint a Container Identifier
</h1>
<form action="."
method="post">
<p>
<div>
<input type="submit" value="Mint">
</div>
</form>
</body>
</html>"""
return output_html(resp, 200)
class HTMLMemberAdd(Resource):
def get(self, container_id):
log.info("Received GET @ HTML member add endpoint")
resp = """<html>
<body>
<h1>
Add a member to Container {}
</h1>
<form action="./{}/"
method="post">
<p>
Member Identifier:<br>
<input type="text" name="member" size="30">
</p>
<div>
<input type="submit" value="Add">
</div>
</form>
</body>
</html>""".format(container_id, container_id)
return output_html(resp, 200)
class Container(Resource):
def post(self, container_id):
log.info("Received POST @ Container endpoint")
log.debug("Parsing args")
parser = reqparse.RequestParser()
parser.add_argument('member', type=str, help="The member id to add",
action="append", required=True)
args = parser.parse_args()
log.debug("Args parsed")
try:
return {
"Added": [
{
"identifier": x,
"_link": API.url_for(Member, container_id=container_id, member_id=x)
} for x in BLUEPRINT.config['storage'].add_members(container_id, args['member'])
],
"_self": {
"identifier": container_id,
"_link": API.url_for(Container, container_id=container_id)
}
}
except KeyError:
log.critical("Container with id {} not found".format(container_id))
abort(404)
def get(self, container_id):
log.info("Received GET @ Container endpoint")
parser = pagination_args_parser.copy()
args = parser.parse_args()
args['limit'] = check_limit(args['limit'])
try:
if not BLUEPRINT.config['storage'].container_exists(container_id):
raise KeyError
next_cursor, paginated_ids = BLUEPRINT.config['storage'].ls_members(
container_id, cursor=args['cursor'], limit=args['limit'])
return {
"Members": [
{
"identifier": x,
"_link": API.url_for(Member, container_id=container_id, member_id=x)
} for x in paginated_ids
],
"pagination": {
"cursor": args['cursor'],
"limit": args['limit'],
"next_cursor": next_cursor
},
"_self": {
"identifier": container_id,
"_link": API.url_for(Container, container_id=container_id)
}
}
except KeyError:
log.critical("Container with id {} not found".format(container_id))
abort(404)
def delete(self, container_id):
log.info("Received DELETE @ Container endpoint")
BLUEPRINT.config['storage'].rm_container(container_id)
return {
"Deleted": True,
"_self": {
"identifier": container_id,
"_link": API.url_for(Container, container_id=container_id)
}
}
class Member(Resource):
def get(self, container_id, member_id):
log.info("Received GET @ Member endpoint")
try:
if BLUEPRINT.config['storage'].member_exists(container_id, member_id):
return {
"_self": {
"identifier": member_id,
"_link": API.url_for(
Member, container_id=container_id, member_id=member_id
)
},
"Container": {
"identifier": container_id,
"_link": API.url_for(Container, container_id=container_id)}
}
else:
raise KeyError()
except KeyError:
log.critical("Container with id {} ".format(container_id) +
"or member with id {} ".format(member_id) +
"not found")
abort(404)
def delete(self, container_id, member_id):
log.info("Received DELETE @ Member endpoint")
BLUEPRINT.config['storage'].rm_member(container_id, member_id)
return {
"Deleted": True,
"_self": {
"identifier": member_id,
"_link": API.url_for(Member, container_id=container_id, member_id=member_id)
},
"Container": {
"identifier": container_id,
"_link": API.url_for(Container, container_id=container_id)
}
}
class Version(Resource):
def get(self):
return {"version": __version__}
@BLUEPRINT.record
def handle_configs(setup_state):
app = setup_state.app
BLUEPRINT.config.update(app.config)
if BLUEPRINT.config.get('DEFER_CONFIG'):
log.debug("DEFER_CONFIG set, skipping configuration")
return
storage_choice = BLUEPRINT.config.get("STORAGE_BACKEND")
if storage_choice is None:
raise RuntimeError(
"Missing required configuration value 'STORAGE_BACKEND'"
)
supported_backends = {
"mongodb": MongoStorageBackend,
"redis": RedisStorageBackend,
"ram": RAMStorageBackend,
"noerror": None
}
if storage_choice.lower() not in supported_backends:
raise RuntimeError(
"Unsupported STORAGE_BACKEND: {}\n".format(storage_choice) +
"Supported storage backends include: " +
"{}".format(", ".join(supported_backends.keys()))
)
else:
BLUEPRINT.config['storage'] = supported_backends.get(storage_choice.lower())(BLUEPRINT)
if BLUEPRINT.config.get("VERBOSITY"):
log.debug("Setting verbosity to {}".format(str(BLUEPRINT.config['VERBOSITY'])))
logging.basicConfig(level=BLUEPRINT.config['VERBOSITY'])
else:
log.debug("No verbosity option set, defaulting to WARN")
logging.basicConfig(level="WARN")
@BLUEPRINT.before_request
def before_request():
# Check to be sure all our pre-request configuration has been done.
if not isinstance(BLUEPRINT.config.get('storage'), IStorageBackend):
raise ImproperConfigurationError()
API.add_resource(Root, "/")
API.add_resource(HTMLMint, "/mint")
API.add_resource(HTMLMemberAdd, "/<string:container_id>/add")
# Trailing slash as a reminder that this is "directory-esque"
API.add_resource(Container, "/<string:container_id>/")
API.add_resource(Member, "/<string:container_id>/<string:member_id>")
API.add_resource(Version, "/version")
| uchicago-library/idnest | idnest/blueprint/__init__.py | Python | gpl-3.0 | 17,401 | [
"Brian"
] | ee64f4e864887143e30ff4ec618823216fdfaa59d49f78e2badea8770ea9812b |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-18 17:09
from __future__ import unicode_literals
import advisornotes.models
import autoslug.fields
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('advisornotes', '0005_autoslug'),
]
operations = [
migrations.AlterField(
model_name='advisornote',
name='advisor',
field=models.ForeignKey(editable=False, help_text='The advisor that created the note', on_delete=django.db.models.deletion.PROTECT, related_name='advisor', to='coredata.Person'),
),
migrations.AlterField(
model_name='advisornote',
name='file_attachment',
field=models.FileField(blank=True, max_length=500, null=True, storage=django.core.files.storage.FileSystemStorage(base_url=None, location='submitted_files'), upload_to=advisornotes.models.attachment_upload_to),
),
migrations.AlterField(
model_name='advisornote',
name='nonstudent',
field=models.ForeignKey(editable=False, help_text='The non-student that the note is about', null=True, on_delete=django.db.models.deletion.PROTECT, to='advisornotes.NonStudent'),
),
migrations.AlterField(
model_name='advisornote',
name='student',
field=models.ForeignKey(editable=False, help_text='The student that the note is about', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='student', to='coredata.Person'),
),
migrations.AlterField(
model_name='advisornote',
name='text',
field=models.TextField(help_text='Note about a student', verbose_name='Contents'),
),
migrations.AlterField(
model_name='advisornote',
name='unit',
field=models.ForeignKey(help_text='The academic unit that owns this note', on_delete=django.db.models.deletion.PROTECT, to='coredata.Unit'),
),
migrations.AlterField(
model_name='advisorvisit',
name='advisor',
field=models.ForeignKey(editable=False, help_text='The advisor that created the note', on_delete=django.db.models.deletion.PROTECT, related_name='+', to='coredata.Person'),
),
migrations.AlterField(
model_name='advisorvisit',
name='nonstudent',
field=models.ForeignKey(blank=True, help_text='The non-student that visited', null=True, on_delete=django.db.models.deletion.PROTECT, to='advisornotes.NonStudent'),
),
migrations.AlterField(
model_name='advisorvisit',
name='program',
field=models.ForeignKey(blank=True, help_text='The unit of the program the student is in', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='coredata.Unit'),
),
migrations.AlterField(
model_name='advisorvisit',
name='student',
field=models.ForeignKey(blank=True, help_text='The student that visited the advisor', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='coredata.Person'),
),
migrations.AlterField(
model_name='advisorvisit',
name='unit',
field=models.ForeignKey(help_text='The academic unit that owns this visit', on_delete=django.db.models.deletion.PROTECT, to='coredata.Unit'),
),
migrations.AlterField(
model_name='artifact',
name='category',
field=models.CharField(choices=[('INS', 'Institution'), ('PRO', 'Program'), ('OTH', 'Other')], max_length=3),
),
migrations.AlterField(
model_name='artifact',
name='name',
field=models.CharField(help_text='The name of the artifact', max_length=140),
),
migrations.AlterField(
model_name='artifact',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='autoslug', unique=True),
),
migrations.AlterField(
model_name='artifact',
name='unit',
field=models.ForeignKey(help_text='The academic unit that owns this artifact', on_delete=django.db.models.deletion.PROTECT, to='coredata.Unit'),
),
migrations.AlterField(
model_name='artifactnote',
name='advisor',
field=models.ForeignKey(editable=False, help_text='The advisor that created the note', on_delete=django.db.models.deletion.PROTECT, to='coredata.Person'),
),
migrations.AlterField(
model_name='artifactnote',
name='artifact',
field=models.ForeignKey(blank=True, help_text='The artifact that the note is about', null=True, on_delete=django.db.models.deletion.PROTECT, to='advisornotes.Artifact'),
),
migrations.AlterField(
model_name='artifactnote',
name='best_before',
field=models.DateField(blank=True, help_text='The effective date for this note', null=True),
),
migrations.AlterField(
model_name='artifactnote',
name='category',
field=models.CharField(choices=[('EXC', 'Exceptions'), ('WAI', 'Waivers'), ('REQ', 'Requirements'), ('TRA', 'Transfers'), ('MIS', 'Miscellaneous')], max_length=3),
),
migrations.AlterField(
model_name='artifactnote',
name='course',
field=models.ForeignKey(blank=True, help_text='The course that the note is about', null=True, on_delete=django.db.models.deletion.PROTECT, to='coredata.Course'),
),
migrations.AlterField(
model_name='artifactnote',
name='course_offering',
field=models.ForeignKey(blank=True, help_text='The course offering that the note is about', null=True, on_delete=django.db.models.deletion.PROTECT, to='coredata.CourseOffering'),
),
migrations.AlterField(
model_name='artifactnote',
name='file_attachment',
field=models.FileField(blank=True, max_length=500, null=True, storage=django.core.files.storage.FileSystemStorage(base_url=None, location='submitted_files'), upload_to=advisornotes.models.attachment_upload_to),
),
migrations.AlterField(
model_name='artifactnote',
name='text',
field=models.TextField(help_text='Note about a student', verbose_name='Contents'),
),
migrations.AlterField(
model_name='artifactnote',
name='unit',
field=models.ForeignKey(help_text='The academic unit that owns this note', on_delete=django.db.models.deletion.PROTECT, to='coredata.Unit'),
),
migrations.AlterField(
model_name='nonstudent',
name='email_address',
field=models.EmailField(blank=True, help_text='Needed only if you want to copy the student on notes', max_length=254, null=True),
),
migrations.AlterField(
model_name='nonstudent',
name='notes',
field=models.TextField(blank=True, help_text='Any general information for the student'),
),
migrations.AlterField(
model_name='nonstudent',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='autoslug', unique=True),
),
migrations.AlterField(
model_name='nonstudent',
name='start_year',
field=models.IntegerField(blank=True, help_text='The predicted/potential start year', null=True),
),
migrations.AlterField(
model_name='nonstudent',
name='unit',
field=models.ForeignKey(blank=True, help_text='The potential academic unit for the student', null=True, on_delete=django.db.models.deletion.PROTECT, to='coredata.Unit'),
),
]
| sfu-fas/coursys | advisornotes/migrations/0006_on_delete.py | Python | gpl-3.0 | 8,094 | [
"VisIt"
] | fad20dc48f8d817b4a24e9b1f5e10cf7dbcda9ad8e9047b8c03e1c0b2a4165aa |
#!/home/scollis/anaconda/bin/python
from __future__ import print_function
import netCDF4
#import time
def grab_hrrr_slice(outname, reftime_index, time1_index):
""" Grab select variables and reftime/time1 slice from HRRR file. """
ncep_hrrr = netCDF4.Dataset('http://thredds-jumbo.unidata.ucar.edu/thredds/dodsC/grib/NCEP/HRRR/CONUS_2p5km/TwoD')
# create the output NetCDF file, open for writing
out_dset = netCDF4.Dataset(outname, 'w')
# create the dimensions in the output netCDF file
for dim in ['x', 'y', 'isobaric']:
out_dset.createDimension(dim, len(ncep_hrrr.dimensions[dim]))
# copy selected data for select variables to output NetCDF file
var_names = ['Planetary_boundary_layer_height_surface',
'Dewpoint_temperature_isobaric',
'Temperature_isobaric',
'Pressure_reduced_to_MSL_msl',
'u-component_of_wind_isobaric',
'v-component_of_wind_isobaric']
for var_name in var_names:
print(var_name)
in_var = ncep_hrrr.variables[var_name]
in_units = ncep_hrrr.variables['time'].units
print(in_units)
out_var = out_dset.createVariable(var_name, in_var.dtype, in_var.dimensions[2:])
out_units = out_dset.createVariable(var_name + 'timestamp', 'S1')
out_units[:] = in_units[:]
out_var[:] = in_var[reftime_index, time1_index][:]
out_var.units = ncep_hrrr.variables['time'].units
pblh = out_dset.variables['Planetary_boundary_layer_height_surface'][0, 0:20]
print(pblh)
out_dset.timestamp = str(ncep_hrrr.variables['time'][reftime_index, time1_index]) + " :: " + ncep_hrrr.variables['time'].units
out_dset.close()
if __name__ == "__main__":
file_num = open('/data/san_store/dap_hrrr/current.txt', 'r+')
cnum = file_num.readlines()
num = int(cnum[-1])
print('hello world')
# reftime index to copy to output file
time1_index = 0 # time1 index to copy to output file
for time1_index in range(10):
grab_hrrr_slice('/data/san_store/dap_hrrr/selected_data_%03d_%03d.nc' %(num, time1_index), -1, time1_index)
#time.sleep(3600)
num += 1
file_num.write(str(num) + '\n')
file_num.close()
| scollis/SULI-2015 | code/get_hrrr.py | Python | bsd-2-clause | 2,256 | [
"NetCDF"
] | e6b80e9bcc67d11670b71e0bcac19f8f5e429f079c0b4634f53046c116215222 |
from bs4 import BeautifulSoup
import requests
from operator import itemgetter
class TopMembers:
def __init__(self, username, password):
self.username = username
self.password = password
self.members = []
#Start session
self.session = requests.Session()
def __popupulate_list(self):
if len(self.members) > 0:
return
soup = self.__getbs("http://care-tags.org/memberlist.php?start=0")
num_members = int(soup.find(class_="pagination").text.split()[0]) # Gets the thread count
curr_members = 0
while curr_members < num_members:
print "Working, page", curr_members/25 + 1, "of", num_members/25 + 1
soup = self.__getbs("http://care-tags.org/memberlist.php?start=" + str(curr_members))
memberlist = soup.find(id="memberlist").tbody
for member in memberlist.children:
if member.string is None: # Gets rid of the newlines
# Store username and post count pair in the list
user = member.td.a.string
postcount = int(member.find(class_="posts").string)
repcount = int(member.find_all(class_="posts")[1].string)
if postcount > 0:
rep_per = float(repcount)/float(postcount)
else:
rep_per = -1
self.members.append({"user": user, "posts": postcount, "rep": repcount, "rep_per":rep_per})
curr_members += 25
# Returns list containing top 10 users (sorted by post count) and their post count as a tuple
def gettopposters(self, return_count):
self.__popupulate_list()
postcountlist = []
for entry in self.members:
postcountlist.append((entry["user"], entry["posts"]))
postcountlist.sort(key=itemgetter(1), reverse=True)
# Take the top 10 users
return postcountlist[:return_count]
# Returns list containing top 10 users (sorted by reputation) and their reputation as a tuple
def mostreppedusers(self, return_count):
self.__popupulate_list()
userreplist = []
for entry in self.members:
userreplist.append((entry["user"], entry["rep"]))
userreplist.sort(key=itemgetter(1), reverse=True)
# Take top 10 users
return userreplist[:return_count]
# Returns list containing top return_count users (sorted by rep per post count) in the form
# (User, rep/post, posts, rep)
def most_rep_per_post_users(self, return_count):
self.__popupulate_list()
users = []
for entry in self.members:
users.append((entry["user"], entry["rep_per"]))
users.sort(key=itemgetter(1), reverse=True)
return users[:return_count]
def __getbs(self, url):
# Initial variables
username = self.username
password = self.password
website = "http://care-tags.org/ucp.php?mode=login"
# Login with provided credentials
sess = self.session
payload = {"username": username, "password": password,
"autologin": "on", "login": "login"}
sess.post(website, data=payload)
# Visit desired url
response = sess.get(url)
# Pull page
bs = BeautifulSoup(response.text)
return bs
if __name__ == "__main__":
# from optparse import OptionParser
# import sys
#
# parser = OptionParser()
#
# usage = "Usage: %prog -u <USERNAME> -p <PASSWORD> -n <USERNUMBER TO DISPLAY>"
# parser = OptionParser(usage)
#
# parser.add_option(
# "-u",
# "--username",
# dest="user",
# help="Needs a username to login with",
# default=None)
#
# parser.add_option(
# "-p",
# "--password",
# dest="pw",
# help="Need the password to login user",
# default=None)
#
# parser.add_option(
# "-n",
# "--number",
# dest="usernum",
# help="The user number to look up",
# type="int",
# default=2)
#
# if not len(sys.argv) == 7:
# parser.print_help()
# sys.exit()
#
# (options, args) = parser.parse_args()
hakuna = TopMembers("pythonbot", "autonomous")
print "Top posters by post count:"
for poster in hakuna.gettopposters(10):
print poster[0], poster[1]
print "Top posters by reputation:"
for poster in hakuna.mostreppedusers(10):
print poster[0], poster[1]
print "Top repped per post:"
for poster in hakuna.most_rep_per_post_users(10):
print poster[0], poster[1] | rbonick/care-tags_scripts | care_tags_topposts/topmembers.py | Python | mit | 4,751 | [
"VisIt"
] | 58ccdbf5d79ff7903e688f54d517e194b5b0d22d3a344cbe25a4c90d57ae2aa2 |
from ase.atoms import Atom, Atoms
from ase.calculators.singlepoint import SinglePointDFTCalculator
def read_gpaw_text(fileobj, index=-1):
if isinstance(fileobj, str):
fileobj = open(fileobj)
def index_startswith(lines, string):
for i, line in enumerate(lines):
if line.startswith(string):
return i
raise ValueError
lines = fileobj.readlines()
images = []
while True:
try:
i = lines.index('Unit Cell:\n')
except ValueError:
pass
else:
cell = []
pbc = []
for line in lines[i + 3:i + 6]:
words = line.split()
if len(words) == 5: # old format
cell.append(float(words[2]))
pbc.append(words[1] == 'yes')
else: # new format with GUC
cell.append([float(word) for word in words[3:6]])
pbc.append(words[2] == 'yes')
try:
i = lines.index('Positions:\n')
except ValueError:
break
atoms = Atoms(cell=cell, pbc=pbc)
for line in lines[i + 1:]:
words = line.split()
if len(words) != 5:
break
n, symbol, x, y, z = words
symbol = symbol.split('.')[0]
atoms.append(Atom(symbol, [float(x), float(y), float(z)]))
lines = lines[i + 5:]
try:
i = lines.index('-------------------------\n')
except ValueError:
e = None
else:
line = lines[i + 9]
assert line.startswith('Zero Kelvin:')
e = float(line.split()[-1])
try:
ii = index_startswith(lines, 'Fermi Level:')
except ValueError:
eFermi = None
else:
try:
eFermi = float(lines[ii].split()[2])
except ValueError: # we have two Fermi levels
fields = lines[ii].split()
def strip(string):
for rubbish in '[],':
string = string.replace(rubbish, '')
return string
eFermi = [float(strip(fields[2])),
float(strip(fields[3])) ]
try:
ii = index_startswith(lines, 'Total Charge:')
except ValueError:
q = None
else:
q = float(lines[ii].split()[2])
try:
ii = index_startswith(lines, 'Local Magnetic Moments')
except ValueError:
magmoms = None
else:
magmoms = []
for i in range(ii + 1, ii + 1 + len(atoms)):
iii, magmom = lines[i].split()[:2]
magmoms.append(float(magmom))
try:
ii = lines.index('Forces in eV/Ang:\n')
except ValueError:
f = None
else:
f = []
for i in range(ii + 1, ii + 1 + len(atoms)):
try:
x, y, z = lines[i].split()[-3:]
f.append((float(x), float(y), float(z)))
except (ValueError, IndexError), m:
raise IOError('Malformed GPAW log file: %s' % m)
if len(images) > 0 and e is None:
break
if e is not None or f is not None:
calc = SinglePointDFTCalculator(e, f, None, magmoms, atoms, eFermi)
atoms.set_calculator(calc)
if q is not None:
n = len(atoms)
atoms.set_charges([q / n] * n)
images.append(atoms)
lines = lines[i:]
if len(images) == 0:
raise IOError('Corrupted GPAW-text file!')
return images[index]
| slabanja/ase | ase/io/gpawtext.py | Python | gpl-2.0 | 3,757 | [
"ASE",
"GPAW"
] | d7df74c9b8c8c2c3b2d7628d0d9084f315f69df1fbb4f11dd0ccd082fd5d4b68 |
#!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Checks (rule) patterns associated with rows in tables, and adds an
additional column to each row (in each table) which captures
constraints in rule pattern.
"""
import dgen_core
# If true, print traces of how patterns are added.
# Useful to trace how patterns are generated for one (or more) tables,
# depending on the value of _restrict_to_tables.
_trace = False
# If defined, do a detailed trace of optimizing the given pattern
# Note: This flag is used to discover the cause of a "Row not reachable"
# or a "Table XXX malformed for pattern YYY" exception. It also can be
# used to see how the $pattern test was generated in the generated decoder
# state.
_trace_detailed_pattern = None
# If defined, only optimize patterns only in the given list of table names
_restrict_to_tables = None
def add_rule_pattern_constraints(decoder):
"""Adds an additional column to each table, defining additional
constraints assumed by rule patterns in rows.
"""
for table in decoder.tables():
_add_rule_pattern_constraints_to_table(decoder, table)
return decoder
def _process_table(table):
global _restrict_to_tables
return table.name in _restrict_to_tables if _restrict_to_tables else True
def _add_rule_pattern_constraints_to_table(decoder, table):
"""Adds an additional column to the given table, defining
additional constraints assumed by rule patterns in rows.
"""
global _trace
if _trace and _process_table(table):
print "*** processing table: %s ***" % table.name
constraint_col = len(table.columns())
table.add_column(dgen_core.BitField('$pattern', 31, 0))
for row in table.rows():
_add_rule_pattern_constraints_to_row(
decoder, table, row, constraint_col)
def _add_rule_pattern_constraints_to_row(decoder, table, row, constraint_col):
"""Adds an additional (constraint) colum to the given row,
defining additional constraints assumed by the rule
pattern in the row.
"""
global _trace
if _trace and _process_table(table):
print "consider: %s" % repr(row)
action = row.action
if action and action.__class__.__name__ == 'DecoderAction':
pattern = action.pattern()
if pattern:
rule_pattern = table.define_pattern(pattern, constraint_col)
if _process_table(table):
# Figure out what bits in the pattern aren't tested when
# reaching this row, and add a pattern to cover those bits.
reaching_pattern = RulePatternLookup.reaching_pattern(
decoder, table, row, pattern, constraint_col)
row.add_pattern(reaching_pattern)
else:
row.add_pattern(table.define_pattern(pattern, constraint_col))
return
# If reached, no explicit pattern defined, so add default pattern
row.add_pattern(table.define_pattern('-', constraint_col))
class RulePatternLookup(object):
"""Lookup state for finding what parts of an instruction rule pattern
survive to the corresponding row of a table. This information is
use to optimize how rule patterns are added.
Note: Implements a table stack so that a depth-first
search can be used. The stack is used to detect cycles,
and report the problem if detected.
Note: This data structure also implements a row stack. This
stack is not really needed. However, when debugging, it can
be very useful in describing how the current state was reached.
Hence, it is included for that capability.
"""
@staticmethod
def reaching_pattern(decoder, table, row, pattern_text, pattern_column):
"""Given a rule in the given row, of the given table, of the
given decoder, return the set of bit patterns not already
handled.
"""
# Create a look up state and then do a depth-first walk of possible
# matches, to find possible (unmatched) patterns reaching the
# given table and row.
state = RulePatternLookup(decoder, table, row,
pattern_text, pattern_column)
if state._trace_pattern():
print "*** Tracing pattern: %s ***" % pattern_text
print " table: %s" % table.name
print " row: %s" % repr(row)
# Do a depth-first walk of possible matches, to find
# possible (unmatched) patterns reaching the given table and
# row.
state._visit_table(decoder.primary)
# Verify that the row can be reached!
if not state.is_reachable:
raise Exception("Row not reachable: %s : %s"
% (table.name, repr(row)))
# Return the pattern of significant bits that could not
# be ruled out by table (parse) patterns.
return state.reaching_pattern
def _trace_pattern(self):
global _trace_detailed_pattern
if _trace_detailed_pattern:
return (_trace_detailed_pattern and
self.pattern_text == _trace_detailed_pattern)
def __init__(self, decoder, table, row, pattern_text, pattern_column):
"""Create a rule pattern lookup. Arguments are:
decoder - The decoder being processed.
table - The table in the decoder the row appears in.
row - The row we are associating a pattern with.
pattern - The (rule) pattern associated with a row.
Uses a depth-first search to find all possible paths
that can reach the given row in the given table, and
what bits were already tested in that path.
"""
self.decoder = decoder
self.table = table
self.row = row
self.pattern_text = pattern_text
# Define the corresponding pattern for the pattern text.
self.pattern = table.define_pattern(pattern_text, pattern_column)
# The following holds the stack of tables visited.
self.visited_tables = []
# The following holds the stack of rows (between tables) visited.
self.visited_rows = []
# The following holds the significant bits that have been shown
# as possibly unmatched. Initially, we assume no bits are significant,
# and let the lookup fill in bits found to be potentially significant.
self.reaching_pattern = dgen_core.BitPattern.always_matches(
self.pattern.column)
# The following holds the part of the current pattern that is still
# unmatched, or at least only partially matched, and therefore can't
# be removed.
self.unmatched_pattern = self.pattern
# The following defines if the pattern is reachable!
self.is_reachable = False
def _visit_table(self, table):
"""Visits the given table, trying to match all rows in the table."""
if self._trace_pattern():
print "-> visit %s" % table.name
if table in self.visited_tables:
# cycle found, quit.
raise Exception("Table %s malformed for pattern %s" %
(table.name, repr(self.pattern)))
return
self.visited_tables.append(table)
for row in table.rows():
self._visit_row(row)
self.visited_tables.pop()
if self._trace_pattern():
print "<- visit %s" % table.name
def _visit_row(self, row):
"""Visits the given row of a table, and updates the reaching pattern
if there are unmatched bits for the (self) row being processed.
"""
global _trace
self.visited_rows.append(row)
if self._trace_pattern():
print 'row %s' % row
# Before processing the row, use a copy of the unmatched pattern so
# that we don't pollute other path searches through the tables.
previous_unmatched = self.unmatched_pattern
self.unmatched_pattern = self.unmatched_pattern.copy()
matched = True # Assume true till proven otherwise.
# Try to match each pattern in the row, removing matched significant
# bits from the unmatched pattern.
for row_pattern in row.patterns:
match = self.unmatched_pattern.categorize_match(row_pattern)
if self._trace_pattern():
print ('match %s : %s => %s' %
(repr(self.unmatched_pattern), repr(row_pattern), match))
if match == 'match':
# Matches, i.e. all significant bits were used in the match.
self.unmatched_pattern = (
self.unmatched_pattern.remove_overlapping_bits(row_pattern))
if self._trace_pattern():
print ' unmatched = %s' % repr(self.unmatched_pattern)
elif match == 'consistent':
# Can't draw conclusion if any bits of pattern
# affect the unmatched pattern. Hence, ignore this
# pattern and continue matching remaining patterns
# in the row.
continue
elif match == 'conflicts':
# This row can't be followed because it conflicts with
# the unmatched pattern. Give up.
matched = False
break
else:
# This should not happen!
raise Exception("Error matching %s and %s!"
% (repr(row_pattern), repr(self.unmatched_pattern)))
if matched:
# Row (may) apply. Continue search for paths that can match
# the pattern.
if self._trace_pattern():
print "row matched!"
print "row: %s" % repr(row)
if row == self.row:
# We've reached the row in the table that we are trying to
# reach. Ssignificant bits remaining in unmatched_pattern
# still need to be tested. Union them into the reaching pattern.
old_reaching = self.reaching_pattern.copy()
self.reaching_pattern = self.reaching_pattern.union_mask_and_value(
self.unmatched_pattern)
if self._trace_pattern():
print (" reaching pattern: %s => %s" %
(repr(old_reaching), repr(self.reaching_pattern)))
self.is_reachable = True
if _trace:
print "*** pattern inference ***"
self._print_trace()
print ("implies: %s => %s" %
(repr(self.pattern), repr(self.unmatched_pattern)))
print ("resulting in: %s => %s" %
(repr(old_reaching), repr(self.reaching_pattern)))
else:
# if action is to call another table, continue search with that table.
if row.action and row.action.__class__.__name__ == 'DecoderMethod':
tbl = self.decoder.get_table(row.action.name)
if tbl:
self._visit_table(tbl)
else:
raise Exception("Error: action -> %s used, but not defined" %
row.action.name)
# Restore state back to before matching the row.
self.visited_rows.pop()
self.unmatched_pattern = previous_unmatched
def _print_trace(self):
for i in range(0, len(self.visited_tables)):
print "Table %s:" % self.visited_tables[i].name
if i < len(self.visited_rows):
print " %s" % self.visited_rows[i].patterns
| wilsonianb/nacl_contracts | src/trusted/validator_arm/dgen_add_patterns.py | Python | bsd-3-clause | 10,951 | [
"VisIt"
] | 4b705b7111d254ce9bfce323b81be7d48167f43c51a7a90752d7ef76bd61eae4 |
__all__ = [
'CombineTables',
'ReshapeTable',
'ExtractArray',
'SplitTableOnArray',
'AppendTableToCellData',
]
__displayname__ = 'Table Operations'
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
import pyvista as pv
from .. import _helpers, interface
from ..base import FilterBase, FilterPreserveTypeBase
###############################################################################
###############################################################################
class CombineTables(FilterBase):
"""Takes two tables and combines them if they have the same number of rows.
Currently this cannot handle time varing tables as that gets complicated
real quick if the tables do not have the same timestep values
"""
__displayname__ = 'Combine Tables'
__category__ = 'filter'
def __init__(self):
FilterBase.__init__(
self,
nInputPorts=2,
inputType='vtkTable',
nOutputPorts=1,
outputType='vtkTable',
)
# Parameters... none
# CRITICAL for multiple input ports
def FillInputPortInformation(self, port, info):
"""Used by pipeline. Necessary when dealing with multiple input ports"""
# all are tables so no need to check port
info.Set(self.INPUT_REQUIRED_DATA_TYPE(), "vtkTable")
return 1
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Inputs from different ports:
pdi0 = self.GetInputData(inInfo, 0, 0)
pdi1 = self.GetInputData(inInfo, 1, 0)
pdo = self.GetOutputData(outInfo, 0)
pdo.DeepCopy(pdi0)
# Get number of rows
nrows = pdi0.GetNumberOfRows()
nrows1 = pdi1.GetNumberOfRows()
if not (nrows == nrows1):
raise AssertionError('Tables must have the same number of rows')
for i in range(pdi1.GetRowData().GetNumberOfArrays()):
arr = pdi1.GetRowData().GetArray(i)
pdo.GetRowData().AddArray(arr)
return 1
def apply(self, table0, table1):
"""Run the algorithm on the two input tables"""
self.SetInputDataObject(0, table0)
self.SetInputDataObject(1, table1)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
# ---- Reshape Table ----#
class ReshapeTable(FilterBase):
"""This filter will take a ``vtkTable`` object and reshape it. This filter
essentially treats ``vtkTable``s as 2D matrices and reshapes them using
``numpy.reshape`` in a C contiguous manner. Unfortunately, data fields will
be renamed arbitrarily because VTK data arrays require a name.
"""
__displayname__ = 'Reshape Table'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkTable',
nOutputPorts=1,
outputType='vtkTable',
)
# Parameters
self.__nrows = kwargs.get('nrows', 1)
self.__ncols = kwargs.get('ncols', 1)
self.__names = kwargs.get('names', [])
self.__order = kwargs.get('order', 'F')
def _reshape(self, pdi, pdo):
"""Internal helper to perfrom the reshape"""
# Get number of columns
cols = pdi.GetNumberOfColumns()
# Get number of rows
rows = pdi.GetColumn(0).GetNumberOfTuples()
if len(self.__names) != 0:
num = len(self.__names)
if num < self.__ncols:
for i in range(num, self.__ncols):
self.__names.append('Field %d' % i)
elif num > self.__ncols:
raise _helpers.PVGeoError(
'Too many array names. `ncols` specified as %d and %d names given.'
% (self.__ncols, num)
)
else:
self.__names = ['Field %d' % i for i in range(self.__ncols)]
# Make a 2D numpy array and fill with data from input table
data = np.empty((rows, cols))
for i in range(cols):
c = pdi.GetColumn(i)
data[:, i] = interface.convert_array(c)
if (self.__ncols * self.__nrows) != (cols * rows):
raise _helpers.PVGeoError(
'Total number of elements must remain %d. Check reshape dimensions.'
% (cols * rows)
)
# Use numpy.reshape() to reshape data NOTE: only 2D because its a table
# NOTE: column access of this reshape is not contigous
data = np.array(
np.reshape(data.flatten(), (self.__nrows, self.__ncols), order=self.__order)
)
pdo.SetNumberOfRows(self.__nrows)
# Add new array to output table and assign incremental names (e.g. Field0)
for i in range(self.__ncols):
# Make a contigous array from the column we want
col = np.array(data[:, i])
# allow type to be determined by input
# VTK arrays need a name. Set arbitrarily
insert = interface.convert_array(
col, name=self.__names[i]
) # array_type=vtk.VTK_FLOAT
# pdo.AddColumn(insert) # these are not getting added to the output table
# ... work around:
pdo.GetRowData().AddArray(insert) # NOTE: this is in the FieldData
return pdo
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._reshape(pdi, pdo)
return 1
#### Seters and Geters ####
def set_names(self, names):
"""Set names using a semicolon (;) seperated string or a list of strings
Args:
names (string): a string of data array names for the reshaped table
using a semicolon (;) to spearate
"""
# parse the names (a semicolon seperated list of names)
if isinstance(names, str):
names = names.split(';')
if self.__names != names:
self.__names = names
self.Modified()
def add_name(self, name):
"""Use to append a name to the list of data array names for the output
table.
"""
self.__names.append(name)
self.Modified()
def get_names(self):
"""Returns a list of the names given to the new arrays"""
return self.__names
def set_number_of_columns(self, ncols):
"""Set the number of columns for the output ``vtkTable``"""
if isinstance(ncols, float):
ncols = int(ncols)
if self.__ncols != ncols:
self.__ncols = ncols
self.Modified()
def set_number_of_rows(self, nrows):
"""Set the number of rows for the output ``vtkTable``"""
if isinstance(nrows, float):
nrows = int(nrows)
if self.__nrows != nrows:
self.__nrows = nrows
self.Modified()
def set_order(self, order):
"""Set the reshape order (``'C'`` of ``'F'``)"""
if self.__order != order:
self.__order = order
self.Modified()
###############################################################################
class ExtractArray(FilterBase):
"""Extract an array from a ``vtkDataSet`` and make a ``vtkTable`` of it."""
__displayname__ = 'Extract Array'
__category__ = 'filter'
def __init__(self):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkDataSet',
nOutputPorts=1,
outputType='vtkTable',
)
self.__input_array = [None, None]
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Inputs from different ports:
pdi = self.GetInputData(inInfo, 0, 0)
table = self.GetOutputData(outInfo, 0)
# Note user has to select a single array to save out
field, name = self.__input_array[0], self.__input_array[1]
vtkarr = _helpers.get_vtk_array(pdi, field, name)
table.GetRowData().AddArray(vtkarr)
return 1
def SetInputArrayToProcess(self, idx, port, connection, field, name):
"""Used to set the input array(s)
Args:
idx (int): the index of the array to process
port (int): input port (use 0 if unsure)
connection (int): the connection on the port (use 0 if unsure)
field (int): the array field (0 for points, 1 for cells, 2 for
field, and 6 for row)
name (int): the name of the array
"""
if self.__input_array[0] != field:
self.__input_array[0] = field
self.Modified()
if self.__input_array[1] != name:
self.__input_array[1] = name
self.Modified()
return 1
def apply(self, input_data_object, array_name):
"""Run the algorithm on the input data object, specifying the array name
to extract.
"""
self.SetInputDataObject(input_data_object)
arr, field = _helpers.search_for_array(input_data_object, array_name)
self.SetInputArrayToProcess(0, 0, 0, field, array_name)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
class SplitTableOnArray(FilterBase):
"""A filter to seperate table data based on the unique values of a given data
array into a ``vtkMultiBlockDataSet``.
"""
__displayname__ = 'Split Table On Array'
__category__ = 'filter'
def __init__(self):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkTable',
nOutputPorts=1,
outputType='vtkMultiBlockDataSet',
)
self.__input_array = [None, None]
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
table = self.GetInputData(inInfo, 0, 0)
# Get number of points
output = vtk.vtkMultiBlockDataSet.GetData(outInfo, 0)
#### Perfrom task ####
# Get input array
field, name = self.__input_array[0], self.__input_array[1]
wtbl = dsa.WrapDataObject(table)
spliton = _helpers.get_numpy_array(wtbl, field, name)
uniq = np.unique(spliton)
# Split the input data based on indices
df = interface.table_to_data_frame(table)
blk = 0
output.SetNumberOfBlocks(len(uniq))
for val in uniq:
temp = interface.data_frame_to_table(df[df[name] == val])
output.SetBlock(blk, temp)
output.GetMetaData(blk).Set(
vtk.vtkCompositeDataSet.NAME(), '{}{}'.format(name, val)
)
blk += 1
return 1
def SetInputArrayToProcess(self, idx, port, connection, field, name):
"""Used to set the input array(s)
Args:
idx (int): the index of the array to process
port (int): input port (use 0 if unsure)
connection (int): the connection on the port (use 0 if unsure)
field (int): the array field (0 for points, 1 for cells, 2 for
field, and 6 for row)
name (int): the name of the array
"""
if self.__input_array[0] != field:
self.__input_array[0] = field
self.Modified()
if self.__input_array[1] != name:
self.__input_array[1] = name
self.Modified()
return 1
def apply(self, input_data_object, array_name):
"""Run the algorithm on the input data object, specifying the array name
to use for the split.
"""
self.SetInputDataObject(input_data_object)
arr, field = _helpers.search_for_array(input_data_object, array_name)
self.SetInputArrayToProcess(0, 0, 0, field, array_name)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
class AppendTableToCellData(FilterPreserveTypeBase):
"""Takes two inputs, a dataset to preserve and a table of data, where the
data in the table is appended to the CellData of the input dataset.
The 0th port is the dataset to preserve and the 1st port is a table whos rows
will be appended as CellData to the 0th port. The number of rows in the table
MUST match the number of cells in the input dataset.
"""
__displayname__ = 'Append Table to Cell Data'
__category__ = 'filter'
def __init__(self):
FilterPreserveTypeBase.__init__(self, nInputPorts=2)
self._preserve_port = 0 # ensure port 0's type is preserved
self.__timesteps = None
def _update_time_steps(self):
"""For internal use only: appropriately sets the timesteps."""
# Use the inputs' timesteps: this merges the timesteps values
tsAll = _helpers.get_combined_input_time_steps(self)
# Use both inputs' time steps
self.__timesteps = _helpers.update_time_steps(self, tsAll, explicit=True)
return 1
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Inputs from different ports:
pdi0 = self.GetInputData(inInfo, 0, 0) # Keep me!
table = self.GetInputData(inInfo, 1, 0) # add my data to the input
pdo = self.GetOutputData(outInfo, 0) # The output
pdo.DeepCopy(pdi0)
# Get number of rows
nrows = table.GetNumberOfRows()
ncells = pdo.GetNumberOfCells()
if nrows != ncells:
raise _helpers.PVGeoError(
'Number rows in table ({}) does not match number of cells ({})'.format(
nrows, ncells
)
)
for i in range(table.GetRowData().GetNumberOfArrays()):
arr = table.GetRowData().GetArray(i)
pdo.GetCellData().AddArray(arr)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to handle time variance"""
self._update_time_steps()
return 1
def apply(self, dataset, table):
"""Update the algorithm and get the output data object
Args:
dataset (vtkDataSet): Any dataset with CellData
table (vtkTable): table of data values that will be appended to
``dataset``'s CellData
Return:
vtkDataSet: The appended dataset as a new object
"""
self.SetInputDataObject(0, dataset)
self.SetInputDataObject(1, table)
self.Update()
return pv.wrap(self.GetOutput())
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps."""
# if unset, force at least one attempt to set the timesteps
if self.__timesteps is None:
self._update_time_steps()
# self.__timesteps should already be of type list
return self.__timesteps if self.__timesteps is not None else None
| banesullivan/ParaViewGeophysics | PVGeo/filters/tables.py | Python | bsd-3-clause | 15,362 | [
"ParaView",
"VTK"
] | dba7903a7ca9da3207b669327448361341c39f8ce6bf3b78cf408154026d26a2 |
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for gdmlogin."""
from .script import Script
| ruibarreira/linuxtrail | usr/lib/python3/dist-packages/orca/scripts/apps/gdmlogin/__init__.py | Python | gpl-3.0 | 845 | [
"ORCA"
] | 7c252a75f4e7db2bc99d67fcc96997a21279067d3dce0600833bdb3f21ccdced |
# KSparseVector.py
#
# This python script uses distributed-memory library
# Elemental.
#
# Copyright 2012, Hatef Monajemi (monajemi@stanford.edu)
# http://www.stanford.edu/~monajemi
def KSparseVector(k,N,field='R'):
# Have the root process (rank=0) broadcast its seed
# Have each process set its seed to the broadcasted value
j0 = np.random.random_integers(0,N-1,k);
if(field=='R'):
coef = El.Matrix()
El.Gaussian(coef, k, 1 )
El.EntrywiseMap(coef, lambda x : np.sgn(x) ); # k: -1 or 1 randomly
# Jack:: How can I update entries of x0 to have coef as values?
x0 = El.DistMultiVec()
x0.Resize( N, 1 )
for i in xrange(k):
x0.Set( j0[i], coef.Get(i,0) )
else:
sys.exit('Not Impelemented yet')
return x0
| monajemi/TensorPT | Python/DEP/kSparseVector.py | Python | bsd-3-clause | 788 | [
"Gaussian"
] | 34155c7335bab69f4285050f4fc154bc26409aae2d4094d4007d38fafe742400 |
# (c) 2015 - Jaguar Land Rover.
#
# Mozilla Public License 2.0
#
# Library to process updates
import json
import os
import subprocess
import dbus
import swm
#
# Software operation
# Contains a single software operation
# loaded from a manifest file.
#
class SoftwareOperation:
def __init__(self, manifest, op_obj):
self.operation_descriptor = {
'installPackage': (
# Path to DBUS and object.
"org.genivi.PackageManager",
# Method to call
"installPackage",
# Elements to extract from software operations object and to
# provide as DBUS call arguments.
# Second element in tuple is default value. None -> Mandatory
[ ("image", None),
("blacklistedPackages", dbus.Array(manifest.blacklisted_packages, "s"))
]),
'upgradePackage': ( "org.genivi.PackageManager",
"upgradePackage",
[ ("image", None),
("blacklisted_packages", dbus.Array(manifest.blacklisted_packages, "s")),
("allow_downgrade", manifest.allow_downgrade)
]),
'removePackage': ( "org.genivi.PackageManager",
"removePackage",
[ ("package_id", None) ]),
'startComponents': ( "org.genivi.LifecycleManager",
"startComponents",
[ ("components", None) ]),
'stopComponents': ( "org.genivi.LifecycleManager",
"stopComponents",
[ ("components", None) ]),
'reboot': ( "org.genivi.LifecycleManager",
"reboot",
[ ("bootParameters", "") ]),
'createDiskPartition': ( "org.genivi.PartitionManager",
"createDiskPartition",
[ ("disk", None), ("partition_number", None),
("type", None), ("start", None), ("size", None),
("guid", ""), ("name", "") ]),
'resizeDiskPartition': ( "org.genivi.PartitionManager", "resizeDiskPartition",
[ ("disk", None), ("partition_number", None),
("start", None), ("size", None) ]),
'deleteDiskPartition': ( "org.genivi.PartitionManager", "deleteDiskPartition",
[ ("disk", None), ("partition_number", None) ]),
'writeDiskPartition': ( "org.genivi.PartitionManager", "writeDiskPartition",
[ ("disk", None), ("partition_number", None),
("image", None),
("blacklisted_partitions", dbus.Array(manifest.blacklisted_partitions, "s"))
]),
'patchDiskPartition': ( "org.genivi.PartitionManager", "patchDiskPartition",
[ ("disk", None), ("partition_number", None),
("image", None),
("blacklisted_partitions", dbus.Array(manifest.blacklisted_partitions, "s"))
]),
# FIXME: We need to find a specific module loader
# that handles the target module.
# org.genivi.module_loader needs to be replaced
# by org.genivi.module_loader_ecu1
# This should be done programmatically
'flashModuleFirmwareEcu1': ( "org.genivi.ModuleLoaderEcu1", "flashModuleFirmware",
[ ("image", None),
("blacklisted_firmware", dbus.Array(manifest.blacklisted_firmware, "s")),
("allow_downgrade", manifest.allow_downgrade)
])
}
print " SoftwareOperation(): Called: {}".format(op_obj)
# Retrieve unique id for sofware operation
if not 'id' in op_obj:
raise Exception("SoftwareOperation(): 'id' not defined in: {}".format(op_obj))
self.operation_id = op_obj['id']
self.arguments = []
self.time_estimate = op_obj.get('time_estimate', 0)
self.description = op_obj.get('description', '')
self.hmi_message = op_obj.get('hmi_message', '')
self.on_failure = op_obj.get('on_failure', 'continue')
# Retrieve operation
if not 'operation' in op_obj:
raise Exception("'operation' not defined in operation {}.".format(self.operation_id))
operation = op_obj['operation']
# Retrieve the operation descriptor
if operation not in self.operation_descriptor:
raise Exception("operation {} not supported.".format(operation))
# Store the DBUS path (org.genivi.xxx), method, and elements from
# software operations to provide with DBUS call.
(self.path, self.method, arguments) = self.operation_descriptor[operation]
print " SoftwareOperation(): operation_id: {}".format(self.operation_id)
print " SoftwareOperation(): operation: {}".format(operation)
print " SoftwareOperation(): time_estimate: {}".format(self.time_estimate)
print " SoftwareOperation(): description: {}".format(self.description)
print " SoftwareOperation(): on_failure: {}".format(self.on_failure)
print " SoftwareOperation(): dbus path: {}".format(self.path)
print " SoftwareOperation(): dbus method: {}".format(self.method)
# Go through the list of arguments and extract them
# from the manifest's software operation object
# These arguments will be provided, in order, to the DBUS call
for (argument, default_value) in arguments:
if not argument in op_obj:
# Argument was not present as element in software operation
# and default was None, specifying that the argument
# is mandatory.
if default_value == None:
print " SoftwareOperation(): Mandatory element {} not defined in operation".format(argument)
raise Exception("Element {} not defined in operation: {}".format(argument,self.operation_id))
else:
# Argument not found in software operation, but
# we have a default value
value = default_value
print " SoftwareOperation(): method_arg {} = {} (default)".format(argument, value)
else:
value = op_obj[argument]
print " SoftwareOperation(): method_arg {} = {} (from manifest)".format(argument, value)
#
# Ugly workaround.
# We need to prepend the image path with
# the mount point so that the recipient (partition_manager, etc)
# can open it.
#
if argument == "image":
self.arguments.append("{}/{}".format(manifest.mount_point, value))
else:
self.arguments.append(value)
print " ----"
def send_transaction(self, transaction_id):
try:
swm.dbus_method(self.path, self.method, transaction_id, *self.arguments)
except Exception as e:
print "SoftwareOperation.send_transaction({}): Exception: {}".format(self.operation_id, e)
return False
return True
| magnusfeuer/genivi_software_management | software_loading_manager/software_operation.py | Python | mpl-2.0 | 7,694 | [
"Jaguar"
] | 63ab654a7a202fa608593f580618ef3aad915cb48045535ada11f35d4e27e3aa |
"""Miscellaneous utilities."""
import numpy as np
import logging
class Normalizer():
"""
An abstract class for normalizing inputs.
"""
def normalize(self, X):
raise NotImplementedError()
def unnormalize(self, X):
raise NotImplementedError()
class BoundedNormalizer(Normalizer):
"""
A class for normalizing bounded inputs. Extends the abstract Normalizer
class.
:cvar ndarray lb: A matrix of size m-by-1 that contains lower bounds on the
simulation inputs.
:cvar ndarray ub: A matrix of size m-by-1 that contains upper bounds on the
simulation inputs.
**See Also**
utils.misc.UnboundedNormalizer
"""
lb, ub = None, None
def __init__(self, lb, ub):
"""
Initialize a BoundedNormalizer.
:param ndarray lb: A matrix of size m-by-1 that contains lower bounds on
the simulation inputs.
:param ndarray ub: A matrix of size m-by-1 that contains upper bounds on
the simulation inputs.
"""
m = lb.size
self.lb = lb.reshape((1, m))
self.ub = ub.reshape((1, m))
def normalize(self, X):
"""
Return corresponding points shifted and scaled to [-1,1]^m.
:param ndarray X: Contains all input points one wishes to normalize. The
shape of `X` is M-by-m. The components of each row of `X` should be
between `lb` and `ub`.
:return: X_norm, contains the normalized inputs corresponding to `X`.
The components of each row of `X_norm` should be between -1 and 1.
:rtype: ndarray
"""
X, M, m = process_inputs(X)
X_norm = 2.0 * (X - self.lb) / (self.ub - self.lb) - 1.0
return X_norm
def unnormalize(self, X):
"""
Return corresponding points shifted and scaled to [-1,1]^m.
:param ndarray X: Contains all input points one wishes to unnormalize.
The shape of `X` is M-by-m. The components of each row of `X` should
be between -1 and 1.
:return: X_unnorm, Contains the unnormalized inputs corresponding to
`X`. The components of each row of `X_unnorm` should be between `lb`
and `ub`.
:rtype: ndarray
"""
X, M, m = process_inputs(X)
X_unnorm = (self.ub - self.lb) * (X + 1.0) / 2.0 + self.lb
return X_unnorm
class UnboundedNormalizer(Normalizer):
"""
A class for normalizing unbounded, Gaussian inputs to standard normals.
Extends the abstract Normalizer class.
:cvar ndarray mu: A matrix of size m-by-1 that contains the mean of the
Gaussian simulation inputs.
:cvar ndarray L: A matrix size m-by-m that contains the Cholesky factor of
the covariance matrix of the Gaussian simulation inputs.
**See Also**
utils.misc.BoundedNormalizer
**Notes**
A simulation with unbounded inputs is assumed to have a Gaussian weight
function associated with the inputs. The covariance of the Gaussian weight
function should be full rank.
"""
mu, L = None, None
def __init__(self, mu, C):
"""
Initialize an UnboundedNormalizer.
:param ndarray mu: A matrix of size m-by-1 that contains the mean of the
Gaussian simulation inputs.
:param ndarray C: A matrix of size m-by-m that contains the covariance
matrix of the Gaussian simulation inputs.
"""
self.mu = mu.reshape((1, mu.size))
self.L = np.linalg.cholesky(C)
def normalize(self, X):
"""
Return corresponding points transformed to a standard normal
distribution.
:param ndarray X: Contains all input points one wishes to normalize. The
shape of `X` is M-by-m. The components of each row of `X` should be
a draw from a Gaussian with mean `mu` and covariance `C`.
:return: X_norm, Contains the normalized inputs corresponding to `X`.
The components of each row of `X_norm` should be draws from a
standard multivariate normal distribution.
:rtype: ndarray
"""
X, M, m = process_inputs(X)
X0 = X - self.mu
X_norm = np.linalg.solve(self.L,X0.T).T
return X_norm
def unnormalize(self, X):
"""
Return corresponding points transformed to draws from a Gaussian
distribution with mean `mu` and covariance `C`.
:param ndarray X: Contains all input points one wishes to unnormalize.
The shape of `X` is M-by-m. The components of each row of `X` should
be draws from a standard multivariate normal.
:return: X_unnorm, Contains the unnormalized inputs corresponding to
`X`. The components of each row of `X_unnorm` should represent draws
from a multivariate normal with mean `mu` and covariance `C`.
:rtype: ndarray
"""
X, M, m = process_inputs(X)
X0 = np.dot(X,self.L.T)
X_unnorm = X0 + self.mu
return X_unnorm
def process_inputs(X):
"""
Check a matrix of input values for the right shape.
:param ndarray X: Contains input points. The shape of `X` should be M-by-m.
:return: X, The same as the input.
:rtype: ndarray
:return: M, Number of rows in `X`.
:rtype: int
:return: m, Number of columns in `X`.
:rtype: int
"""
if len(X.shape) == 2:
M, m = X.shape
else:
raise ValueError('The inputs X should be a two-d numpy array.')
X = X.reshape((M, m))
return X, M, m
def process_inputs_outputs(X, f):
"""
Check a matrix of input values and a vector of outputs for the right shapes.
:param ndarray X: Contains input points. The shape of `X` should be M-by-m.
:param ndarray f: M-by-1 matrix.
:return: X, The same as the input.
:rtype: ndarray
:return: f, The same as the input.
:rtype: ndarray
:return: M, Number of rows in `X`.
:rtype: int
:return: m, Number of columns in `X`.
:rtype: int
"""
X, M, m = process_inputs(X)
if len(f.shape) == 2:
Mf, mf = f.shape
else:
raise ValueError('The outputs f should be a two-d numpy array.')
if Mf != M:
raise Exception('Different number of inputs and outputs.')
if mf != 1:
raise Exception('Only scalar-valued functions.')
f = f.reshape((M, 1))
return X, f, M, m
def conditional_expectations(f, ind):
"""
Compute conditional expectations and variances for a set of function values.
:param ndarray f: An ndarry of function evaluations.
:param ndarray[int] ind: Index array that tells which values of `f`
correspond to the same value for the active variable.
:return: Ef, An ndarray containing the conditional expectations.
:rtype: ndarray
:return: Vf, An ndarray containing the conditional variances.
:rtype: ndarray
**Notes**
This function computes the mean and variance for all values in the ndarray
`f` that have the same index in `ind`. The indices in `ind` correspond to
values of the active variables.
"""
n = int(np.amax(ind)) + 1
NMC = np.sum(ind==0)
logging.getLogger(__name__).debug('Computing {:d} conditional averages with {:d} MC samples.'.format(n, NMC))
Ef, Vf = np.zeros((n, 1)), np.zeros((n, 1))
for i in range(n):
fi = f[ind == i]
Ef[i] = np.mean(fi)
Vf[i] = np.var(fi)
return Ef, Vf
# thanks to Trent for these functions!!!
def atleast_2d_col(A):
"""
Return the input `A` as a 2d column array.
"""
return atleast_2d(A,'col')
def atleast_2d_row(A):
"""
Return the input `A` as a 2d row array.
"""
return atleast_2d(A,'row')
def atleast_2d(A, oned_as='row'):
"""
Ensures the array `A` is at least two dimensions.
:param ndarray A: matrix
:param str oned_as: Should be either 'row' or 'col'. It determines whether
the array `A` should be expanded as a 2d row or 2d column.
"""
# not an array yet
if not isinstance(A,(np.ndarray,np.matrixlib.defmatrix.matrix)):
if not isinstance(A,(list,tuple)):
A = [A]
A = np.array(A)
# check rank
if np.ndim(A) < 2:
# expand row or col
if oned_as == 'row':
A = A[None,:]
elif oned_as == 'col':
A = A[:,None]
else:
raise Exception , "oned_as must be 'row' or 'col' "
return A
| meyersw3476/active_subspaces | active_subspaces/utils/misc.py | Python | mit | 8,530 | [
"Gaussian"
] | 48e603242765287601ce9c883e491ce1aac2736d2edff8c865d54f64e82bb859 |
##########################################################################
#
# Copyright (c) 2015, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferImage
# Command suitable for use with `NodeMenu.append()`.
def nodeMenuCreateCommand( menu ) :
blur = GafferImage.Blur()
blur["radius"].gang()
return blur
Gaffer.Metadata.registerNode(
GafferImage.Blur,
"description",
"""
Applies a gaussian blur to the image.
""",
plugs = {
"radius" : [
"description",
"""
The size of the blur in pixels. This can be varied independently
in the x and y directions, and fractional values are supported for
fine control.
""",
],
"boundingMode" : [
"description",
"""
The method used when the filter references pixels outside the
input data window.
""",
"preset:Black", GafferImage.Sampler.BoundingMode.Black,
"preset:Clamp", GafferImage.Sampler.BoundingMode.Clamp,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"expandDataWindow" : [
"description",
"""
Expands the data window to include the external pixels
which the blur will bleed onto.
"""
]
}
)
| lucienfostier/gaffer | python/GafferImageUI/BlurUI.py | Python | bsd-3-clause | 2,831 | [
"Gaussian"
] | dd00b6b1baa72750a3997454297dea605ed4dd18ccf7932f69d65e7c6fad9bdf |
#!/usr/bin/env phenix.python
"""
xds_plot_integrate.py
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
"""
TODO: plot differences in direct beam and rotation axis
"""
import sys
import re
import collections
from cctbx import sgtbx
class IntegrateLp:
def __init__(self, lpin):
if lpin is not None:
self.parse(lpin)
# __init__()
def parse(self, int_lp):
re_im = re.compile("^ (.....) 0 +([0-9\.]+) +([0-9]+) +([0-9]+) +([0-9]+) +([0-9]+) +([0-9]+) +([0-9\.]+) +([0-9\.]+)")
re_cell = re.compile("^ UNIT CELL PARAMETERS *([0-9\.]+) *([0-9\.]+) *([0-9\.]+) *([0-9\.]+) *([0-9\.]+) *([0-9\.]+)")
re_rotation = re.compile("^ CRYSTAL ROTATION OFF FROM INITIAL ORIENTATION *([-0-9\.]+) *([-0-9\.]+) *([-0-9\.]+)") #
re_mosaicity = re.compile("^ CRYSTAL MOSAICITY \(DEGREES\) *([0-9\.]+)") #
re_axis = re.compile("^ LAB COORDINATES OF ROTATION AXIS *([-0-9\.]+) *([-0-9\.]+) *([-0-9\.]+)") #
re_beam = re.compile("^ DIRECT BEAM COORDINATES \(REC\. ANGSTROEM\) *([-0-9\.]+) *([-0-9\.]+) *([-0-9\.]+)") #
re_dist = re.compile("^ CRYSTAL TO DETECTOR DISTANCE \(mm\) *([-0-9\.]+)")
re_dev_spot = re.compile("^ STANDARD DEVIATION OF SPOT POSITION \(PIXELS\) *([0-9\.]+)")
re_dev_spindle = re.compile("^ STANDARD DEVIATION OF SPINDLE POSITION \(DEGREES\) *([0-9\.]+)")
re_orig = re.compile("^ DETECTOR ORIGIN \(PIXELS\) AT *([0-9\.]+) *([0-9\.]+)")
images = [] # as key of params
self.cell_changes = []
self.blockparams = collections.OrderedDict()
clear_flag = False
self.frames = []
self.scales, self.overloads, self.rejecteds, self.sigmads, self.sigmars = [], [], [], [], []
self.space_group = None
# Read INTEGRATE.LP file
for l in open(int_lp):
r_im = re_im.search(l)
r_cell = re_cell.search(l)
r_rotation = re_rotation.search(l)
r_dist = re_dist.search(l)
r_spot = re_dev_spot.search(l)
r_spindle = re_dev_spindle.search(l)
r_orig = re_orig.search(l)
if l.startswith(" SPACE_GROUP_NUMBER="):
sgnum = int(l.strip().split()[-1])
if sgnum > 0:
self.space_group = sgtbx.space_group_info(sgnum).group()
if r_im:
if clear_flag:
images = []
clear_flag = False
image, scale, nbkg, novl, newald, nstrong, nrej, sigmad, sigmar = r_im.groups()
images.append(int(image))
# for plot
self.frames.append(int(image))
self.scales.append(scale)
self.overloads.append(int(novl))
self.rejecteds.append(int(nrej))
self.sigmads.append(sigmad)
self.sigmars.append(sigmar)
if r_cell:
#a, b, c, alpha, beta, gamma = r_cell.groups()
self.blockparams.setdefault(tuple(images), {})["cell"] = r_cell.groups()
self.cell_changes.append((images, r_cell.groups()))
clear_flag = True
if r_rotation:
self.blockparams.setdefault(tuple(images), {})["rotation"] = r_rotation.groups()
clear_flag = True
if r_dist:
self.blockparams.setdefault(tuple(images), {})["dist"] = r_dist.group(1)
clear_flag = True
if r_spot:
self.blockparams.setdefault(tuple(images), {})["spot"] = r_spot.group(1)
clear_flag = True
if r_spindle:
self.blockparams.setdefault(tuple(images), {})["spindle"] = r_spindle.group(1)
clear_flag = True
if r_orig:
self.blockparams.setdefault(tuple(images), {})["orig"] = r_orig.groups()
clear_flag = True
if l.startswith(" SIGMAB (degree)"):
self.blockparams.setdefault(tuple(images), {})["sigmab9"] = l.strip().split()[-9:]
clear_flag = True
if l.startswith(" SIGMAR (degree)"):
self.blockparams.setdefault(tuple(images), {})["sigmar9"] = l.strip().split()[-9:]
clear_flag = True
# parse_integrate_lp()
# class IntegrateLp
class CellConstraints:
def __init__(self, space_group):
self.cs = space_group.crystal_system()
# __init__()
def is_b_equal_a(self): return self.cs in ("Tetragonal", "Hexagonal", "Trigonal", "Cubic")
def is_c_equal_a_b(self): return self.cs == "Cubic"
def is_angle_constrained(self, angle):
assert angle in ("alpha", "beta", "gamma")
if self.cs == "Triclinic": return False
if self.cs == "Monoclinic": return angle != "beta"
return True
# is_angle_constrained()
# class CellConstraints
def make_plot(lp, log_out):
ofs = open(log_out, "w")
ofs.write("$TABLE: Parameters estimated for each frame:\n")
ofs.write("$GRAPHS\n")
ofs.write(":scales")
ofs.write(":A:1,2:\n")
ofs.write(":number of overloaded reflections")
ofs.write(":A:1,3:\n")
ofs.write(":number of unexpected reflections")
ofs.write(":A:1,4:\n")
ofs.write(":SIGMAB (beam divergence e.s.d.)")
ofs.write(":A:1,5:\n")
ofs.write(":SIGMAR (reflecting range e.s.d.)")
ofs.write(":A:1,6:\n")
ofs.write("$$\n")
ofs.write("Frame scale overlods nrej sigmaD sigmaM $$\n$$\n")
for f, scale, novl, nrej, sd, sm in zip(lp.frames, lp.scales, lp.overloads, lp.rejecteds, lp.sigmads, lp.sigmars):
ofs.write("%5d %s %d %d %s %s\n" % (f, scale, novl, nrej, sd, sm))
ofs.write("$$\n")
ofs.write("\n\n\n")
ofs.write("$TABLE: Parameters estimated for each block:\n")
ofs.write("$GRAPHS\n")
ofs.write(":unit cell length a")
ofs.write(":A:1,2:\n")
cellconstr = CellConstraints(lp.space_group)
if not cellconstr.is_b_equal_a():
ofs.write(":unit cell length b")
ofs.write(":A:1,3:\n")
if not cellconstr.is_c_equal_a_b():
ofs.write(":unit cell length c")
ofs.write(":A:1,4:\n")
if not cellconstr.is_angle_constrained("alpha"):
ofs.write(":unit cell angle alpha")
ofs.write(":A:1,5:\n")
if not cellconstr.is_angle_constrained("beta"):
ofs.write(":unit cell angle beta")
ofs.write(":A:1,6:\n")
if not cellconstr.is_angle_constrained("gamma"):
ofs.write(":unit cell angle gamma")
ofs.write(":A:1,7:\n")
ofs.write(":rotations off from initial orientation")
ofs.write(":A:1,8,9,10:\n")
ofs.write(":distance")
ofs.write(":A:1,11:\n")
ofs.write(":deviations from predicted positions")
ofs.write(":A:1,12,13:\n")
ofs.write(":beam center")
ofs.write(":A:1,14,15:\n")
ofs.write("$$\n")
ofs.write("#image a b c alpha beta gamma rotx roty rotz dist spot spindle orgx orgy$$\n$$\n")
for images, param in sorted(lp.blockparams.items()):
for i in images:
print >>ofs, "%4d " % i, " ".join(param.get("cell", ["D"]*6)), " ".join(param.get("rotation", ["D"]*3)), param.get("dist","D"), param.get("spot","D"), param.get("spindle","D"), " ".join(param.get("orig",["D"]*2))
ofs.write("$$\n")
ofs.write("\n\n\n")
ofs.write("$TABLE: sigmaB and sigmaR on 9 areas for each block:\n")
ofs.write("$GRAPHS\n")
ofs.write(":SIGMAB")
ofs.write(":A:1,2,3,4,5,6,7,8,9,10:\n")
ofs.write(":SIGMAR")
ofs.write(":A:1,11,12,13,14,15,16,17,18,19:\n")
ofs.write("$$\n")
ofs.write("#image %s %s$$\n$$\n" % (" ".join(["sigmab%d"%x for x in range(1,10)]), " ".join(["sigmar%d"%x for x in range(1,10)])))
for images, param in sorted(lp.blockparams.items()):
for i in images:
print >>ofs, "%4d " % i, " ".join(param["sigmab9"]), " ".join(param["sigmar9"])
ofs.write("$$\n")
ofs.write("\n\n\n")
# make_plot()
def run(int_lp, log_out="plot_integrate.log"):
lpobj = IntegrateLp(int_lp)
make_plot(lpobj, log_out)
# run()
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
int_lp = sys.argv[1]
else:
int_lp = "INTEGRATE.LP"
log_out = "plot_integrate.log"
run(int_lp, log_out)
print
print "Run:"
print "loggraph", log_out
| keitaroyam/yam_scripts | xds_plot_integrate.py | Python | bsd-3-clause | 8,422 | [
"CRYSTAL"
] | 85742eedb368bd7fbe8b6e48fa1d89cfade5593a7b752fdb6364f06783c74b2d |
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
import datetime
import ddt
import random
import pytz
import io
import json
import os
import requests
import shutil
import tempfile
from urllib import quote
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponse
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.utils.timezone import utc
from mock import Mock, patch
from nose.tools import raises
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from course_modes.models import CourseMode
from courseware.models import StudentModule
from courseware.tests.factories import StaffFactory, InstructorFactory, BetaTesterFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_MOCK_MODULESTORE
from courseware.tests.helpers import LoginEnrollmentTestCase
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
from microsite_configuration import microsite
from shoppingcart.models import (
RegistrationCodeRedemption, Order, CouponRedemption,
PaidCourseRegistration, Coupon, Invoice, CourseRegistrationCode, CourseRegistrationCodeInvoiceItem
)
from shoppingcart.pdf import PDFInvoice
from student.models import (
CourseEnrollment, CourseEnrollmentAllowed, NonExistentCourseError
)
from student.tests.factories import UserFactory, CourseModeFactory
from student.roles import CourseBetaTesterRole, CourseSalesAdminRole, CourseFinanceAdminRole, CourseInstructorRole
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
import instructor_task.api
import instructor.views.api
from instructor.tests.utils import FakeContentTask, FakeEmail, FakeEmailInfo
from instructor.views.api import generate_unique_password
from instructor.views.api import _split_input_list, common_exceptions_400
from instructor_task.api_helper import AlreadyRunningError
from .test_tools import msk_from_problem_urlname
from ..views.tools import get_extended_due
EXPECTED_CSV_HEADER = (
'"code","redeem_code_url","course_id","company_name","created_by","redeemed_by","invoice_id","purchaser",'
'"customer_reference_number","internal_reference"'
)
EXPECTED_COUPON_CSV_HEADER = '"code","course_id","percentage_discount","code_redeemed_count","description"'
# ddt data for test cases involving reports
REPORTS_DATA = (
{
'report_type': 'grade',
'instructor_api_endpoint': 'calculate_grades_csv',
'task_api_endpoint': 'instructor_task.api.submit_calculate_grades_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrolled student profile',
'instructor_api_endpoint': 'get_students_features',
'task_api_endpoint': 'instructor_task.api.submit_calculate_students_features_csv',
'extra_instructor_api_kwargs': {'csv': '/csv'}
}
)
@common_exceptions_400
def view_success(request): # pylint: disable=unused-argument
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=unused-argument
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=unused-argument
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
class TestCommonExceptions400(TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
super(TestCommonExceptions400, self).setUp()
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorAPIDenyLevels(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
def setUp(self):
super(TestInstructorAPIDenyLevels, self).setUp()
self.course = CourseFactory.create()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment', {'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_distribution', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership',
{'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('proxy_legacy_analytics', {'aname': 'ProblemGradeDistribution'}),
('send_email', {'send_to': 'staff', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('calculate_grades_csv', {}),
('get_students_features', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})
if endpoint in ['send_email', 'students_update_enrollment', 'bulk_beta_modify_access']:
response = self.client.post(url, args)
else:
response = self.client.get(url, args)
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course_key=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics', 'list_forum_members']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course_key=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
class TestInstructorAPIBulkAccountCreationAndEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test Bulk account creation and enrollment from csv file
"""
def setUp(self):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, self).setUp()
self.request = RequestFactory().request()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.url = reverse('register_and_enroll_students', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.not_enrolled_student = UserFactory(
username='NotEnrolledStudent',
email='nonenrolled@test.com',
first_name='NotEnrolled',
last_name='Student'
)
@patch('instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv_with_blank_lines(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "\ntest_student@example.com,test_student_1,tester1,USA\n\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('instructor.views.api.log.info')
def test_email_and_username_already_exist(self, info_log):
"""
If the email address and username already exists
and the user is enrolled in the course, do nothing (including no email gets sent out)
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
# test the log for email that's send to new created user.
info_log.assert_called_with(
u"user already exists with username '%s' and email '%s'",
'test_student_1',
'test_student@example.com'
)
def test_file_upload_type_not_csv(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.jpg", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Make sure that the file you upload is in CSV format with no extraneous characters or rows.')
def test_bad_file_upload_type(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.csv", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Could not read uploaded file.')
def test_insufficient_data(self):
"""
Try uploading a CSV file which does not have the exact four columns of data
"""
csv_content = "test_student@example.com,test_student_1\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 1)
self.assertEquals(data['general_errors'][0]['response'], 'Data in row #1 must have exactly four columns: email, username, full name, and country')
def test_invalid_email_in_csv(self):
"""
Test failure case of a poorly formatted email field
"""
csv_content = "test_student.example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
data = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Invalid email {0}.'.format('test_student.example.com'))
@patch('instructor.views.api.log.info')
def test_csv_user_exist_and_not_enrolled(self, info_log):
"""
If the email address and username already exists
and the user is not enrolled in the course, enrolled him/her and iterate to next one.
"""
csv_content = "nonenrolled@test.com,NotEnrolledStudent,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
info_log.assert_called_with(
u'user %s enrolled in the course %s',
u'NotEnrolledStudent',
self.course.id
)
def test_user_with_already_existing_email_in_csv(self):
"""
If the email address already exists, but the username is different,
assume it is the correct user and just register the user in the course.
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_2,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
warning_message = 'An account with email {email} exists but the provided username {username} ' \
'is different. Enrolling anyway with {email}.'.format(email='test_student@example.com', username='test_student_2')
self.assertNotEquals(len(data['warnings']), 0)
self.assertEquals(data['warnings'][0]['response'], warning_message)
user = User.objects.get(email='test_student@example.com')
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
def test_user_with_already_existing_username_in_csv(self):
"""
If the username already exists (but not the email),
assume it is a different user and fail to create the new account.
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
def test_csv_file_not_attached(self):
"""
Test when the user does not attach a file
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'file_not_found': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'File is not attached.')
def test_raising_exception_in_auto_registration_and_enrollment_case(self):
"""
Test that exceptions are handled well
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
with patch('instructor.views.api.create_and_enroll_user') as mock:
mock.side_effect = NonExistentCourseError()
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'NonExistentCourseError')
def test_generate_unique_password(self):
"""
generate_unique_password should generate a unique password string that excludes certain characters.
"""
password = generate_unique_password([], 12)
self.assertEquals(len(password), 12)
for letter in password:
self.assertNotIn(letter, 'aAeEiIoOuU1l')
def test_users_created_and_enrolled_successfully_if_others_fail(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student3@example.com,test_student_1,tester3,CA\n" \
"test_student2@example.com,test_student_2,tester2,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
self.assertTrue(User.objects.filter(username='test_student_1', email='test_student1@example.com').exists())
self.assertTrue(User.objects.filter(username='test_student_2', email='test_student2@example.com').exists())
self.assertFalse(User.objects.filter(email='test_student3@example.com').exists())
@patch.object(instructor.views.api, 'generate_random_string',
Mock(side_effect=['first', 'first', 'second']))
def test_generate_unique_password_no_reuse(self):
"""
generate_unique_password should generate a unique password string that hasn't been generated before.
"""
generated_password = ['first']
password = generate_unique_password(generated_password, 12)
self.assertNotEquals(password, 'first')
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': False})
def test_allow_automated_signups_flag_not_set(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEquals(response.status_code, 403)
@ddt.ddt
class TestInstructorAPIEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
def setUp(self):
super(TestInstructorAPIEnrollment, self).setUp()
self.request = RequestFactory().request()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled',
last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.about_path = '/courses/{}/about'.format(self.course.id)
self.course_path = '/courses/{}/'.format(self.course.id)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
@ddt.data('http', 'https')
def test_enroll_with_email(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been enrolled in {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in {} "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student".format(
self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the "
"registration form making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit {proto}://{site}{about_path} to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name, proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@ddt.data('http', 'https')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in {display_name}.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name, proto=protocol, site=self.site_name
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered_autoenroll(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, you will see {display_name} listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, display_name=self.course.display_name
)
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in {display_name} "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student".format(
display_name=self.course.display_name,
)
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course {display_name} by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org".format(
display_name=self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{about_path} and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, about_path=self.about_path,
display_name=self.course.display_name,
)
)
@patch('instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.post(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{course_path} and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
def test_enroll_already_enrolled_student(self):
"""
Ensure that already enrolled "verified" students cannot be downgraded
to "honor"
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# make this enrollment "verified"
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
# now re-enroll the student through the instructor dash
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
# affirm that the student is still in "verified" mode
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
self.assertEqual(course_enrollment.mode, u"verified")
def test_unenroll_and_enroll_verified(self):
"""
Test that unenrolling and enrolling a student from a verified track
results in that student being in an honor track
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# upgrade enrollment
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
self._change_student_enrollment(self.enrolled_student, self.course, 'unenroll')
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
self.assertEqual(course_enrollment.mode, u'honor')
def _change_student_enrollment(self, user, course, action):
"""
Helper function that posts to 'students_update_enrollment' to change
a student's enrollment
"""
url = reverse(
'students_update_enrollment',
kwargs={'course_id': course.id.to_deprecated_string()},
)
params = {
'identifiers': user.email,
'action': action,
'email_students': True,
}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
return response
@ddt.ddt
class TestInstructorAPIBulkBetaEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
def setUp(self):
super(TestInstructorAPIBulkBetaEnrollment, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
self.request = RequestFactory().request()
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.about_path = '/courses/{}/about'.format(self.course.id)
self.course_path = '/courses/{}/'.format(self.course.id)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
@ddt.data('http', 'https')
def test_add_notenrolled_with_email(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"Visit {proto}://{site}{about_path} to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
about_path=self.about_path
)
)
@ddt.data('http', 'https')
def test_add_notenrolled_with_email_autoenroll(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
course_path=self.course_path
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {}\n\nYou have been invited to be a beta tester "
"for {} at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {}".format(
self.notenrolled_student.profile.name,
self.course.display_name,
self.notenrolled_student.email,
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been removed from a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"{display_name} at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
display_name=self.course.display_name,
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
class TestInstructorAPILevelsAccess(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
def setUp(self):
super(TestInstructorAPILevelsAccess, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course_key=self.course.id)
self.other_staff = StaffFactory(course_key=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'robot-not-a-rolename',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'staff',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'beta',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
for user in [self.instructor, self.other_user]:
for identifier_attr in [user.email, user.username]:
for rolename in ["Administrator", "Moderator", "Community TA"]:
for action in ["allow", "revoke"]:
self.assert_update_forum_role_membership(user, identifier_attr, rolename, action)
def assert_update_forum_role_membership(self, current_user, identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(
url,
{
'unique_student_identifier': identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = current_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@ddt.ddt
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestInstructorAPILevelsDataDump(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
def setUp(self):
super(TestInstructorAPILevelsDataDump, self).setUp()
self.course = CourseFactory.create()
self.course_mode = CourseMode(course_id=self.course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=40)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.cart = Order.get_cart_for_user(self.instructor)
self.coupon_code = 'abcde'
self.coupon = Coupon(code=self.coupon_code, description='testing code', course_id=self.course.id,
percentage_discount=10, created_by=self.instructor, is_active=True)
self.coupon.save()
#create testing invoice 1
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
def test_invalidate_sale_record(self):
"""
Testing the sale invalidating scenario.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
data = {'invoice_number': self.sale_invoice_1.id, 'event_type': "invalidate"}
url = reverse('sale_validation', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url, method="POST", data=data)
#Now try to fetch data against not existing invoice number
test_data_1 = {'invoice_number': 100, 'event_type': "invalidate"}
self.assert_request_status_code(404, url, method="POST", data=test_data_1)
# Now invalidate the same invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("The sale associated with this invoice has already been invalidated.", response.content)
# now re_validate the invoice number
data['event_type'] = "re_validate"
self.assert_request_status_code(200, url, method="POST", data=data)
# Now re_validate the same active invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("This invoice is already active.", response.content)
test_data_2 = {'invoice_number': self.sale_invoice_1.id}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_2)
self.assertIn("Missing required event_type parameter", response.content)
test_data_3 = {'event_type': "re_validate"}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_3)
self.assertIn("Missing required invoice_number parameter", response.content)
# submitting invalid invoice number
data['invoice_number'] = 'testing'
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("invoice_number must be an integer, {value} provided".format(value=data['invoice_number']), response.content)
def test_get_sale_order_records_features_csv(self):
"""
Test that the response from get_sale_order_records is in csv format.
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
self.cart.order_type = 'business'
self.cart.save()
self.cart.add_billing_details(company_name='Test Company', company_contact_name='Test',
company_contact_email='test@123', recipient_name='R1',
recipient_email='', customer_reference_number='PO#23')
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': paid_course_reg_item.id, 'qty': '4'})
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': coupon.code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
# get the updated item
item = self.cart.orderitem_set.all().select_subclasses()[0]
# get the redeemed coupon information
coupon_redemption = CouponRedemption.objects.select_related('coupon').filter(order=self.cart)
sale_order_url = reverse('get_sale_order_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(sale_order_url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertIn('36', response.content.split('\r\n')[1])
self.assertIn(str(item.unit_cost), response.content.split('\r\n')[1],)
self.assertIn(str(item.list_price), response.content.split('\r\n')[1],)
self.assertIn(item.status, response.content.split('\r\n')[1],)
self.assertIn(coupon_redemption[0].coupon.code, response.content.split('\r\n')[1],)
def test_coupon_redeem_count_in_ecommerce_section(self):
"""
Test that checks the redeem count in the instructor_dashboard coupon section
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
# Coupon Redeem Count only visible for Financial Admins.
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': coupon.code})
self.assertEqual(resp.status_code, 200)
# URL for instructor dashboard
instructor_dashboard = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
# visit the instructor dashboard page and
# check that the coupon redeem count should be 0
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Redeem Count', resp.content)
self.assertIn('<td>0</td>', resp.content)
# now make the payment of your cart items
self.cart.purchase()
# visit the instructor dashboard page and
# check that the coupon redeem count should be 1
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Redeem Count', resp.content)
self.assertIn('<td>1</td>', resp.content)
def test_get_sale_records_features_csv(self):
"""
Test that the response from get_sale_records is in csv format.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse(
'get_sale_records',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_sale_records_features_json(self):
"""
Test that the response from get_sale_records is in json format.
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
for res in res_json['sale']:
self.validate_sale_records_response(
res,
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
def test_get_sale_records_features_with_multiple_invoices(self):
"""
Test that the response from get_sale_records is in json format for multiple invoices
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='qwerty{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
#create test invoice 2
sale_invoice_2 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw_2', recipient_email='test2@test.com', customer_reference_number='2Fwe23S',
internal_reference="B", course_id=self.course.id
)
invoice_item_2 = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=sale_invoice_2,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='xyzmn{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=sale_invoice_2, invoice_item=invoice_item_2, mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
self.validate_sale_records_response(
res_json['sale'][0],
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
self.validate_sale_records_response(
res_json['sale'][1],
course_registration_code,
sale_invoice_2,
0,
invoice_item=invoice_item_2
)
def validate_sale_records_response(self, res, course_registration_code, invoice, used_codes, invoice_item):
"""
validate sale records attribute values with the response object
"""
self.assertEqual(res['total_amount'], invoice.total_amount)
self.assertEqual(res['recipient_email'], invoice.recipient_email)
self.assertEqual(res['recipient_name'], invoice.recipient_name)
self.assertEqual(res['company_name'], invoice.company_name)
self.assertEqual(res['company_contact_name'], invoice.company_contact_name)
self.assertEqual(res['company_contact_email'], invoice.company_contact_email)
self.assertEqual(res['internal_reference'], invoice.internal_reference)
self.assertEqual(res['customer_reference_number'], invoice.customer_reference_number)
self.assertEqual(res['invoice_number'], invoice.id)
self.assertEqual(res['created_by'], course_registration_code.created_by.username)
self.assertEqual(res['course_id'], invoice_item.course_id.to_deprecated_string())
self.assertEqual(res['total_used_codes'], used_codes)
self.assertEqual(res['total_codes'], 5)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
@ddt.data(True, False)
def test_get_students_features_cohorted(self, is_cohorted):
"""
Test that get_students_features includes cohort info when the course is
cohorted, and does not when the course is not cohorted.
"""
url = reverse('get_students_features', kwargs={'course_id': unicode(self.course.id)})
self.course.cohort_config = {'cohorted': is_cohorted}
self.store.update_item(self.course, self.instructor.id)
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertEqual('cohort' in res_json['feature_names'], is_cohorted)
@patch.object(instructor.views.api, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(instructor.views.api, 'unique_id_for_user', Mock(return_value='41'))
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(
'"User ID","Anonymized User ID","Course Specific Anonymized User ID"'
'\n"3","41","42"\n'
))
self.assertTrue(body.endswith('"8","41","42"\n'))
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.models.LocalFSReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.get(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
@ddt.data(*REPORTS_DATA)
@ddt.unpack
def test_calculate_report_csv_success(self, report_type, instructor_api_endpoint, task_api_endpoint, extra_instructor_api_kwargs):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
with patch(task_api_endpoint):
response = self.client.get(url, {})
success_status = "Your {report_type} report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section.".format(report_type=report_type)
self.assertIn(success_status, response.content)
@ddt.data(*REPORTS_DATA)
@ddt.unpack
def test_calculate_report_csv_already_running(self, report_type, instructor_api_endpoint, task_api_endpoint, extra_instructor_api_kwargs):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
with patch(task_api_endpoint) as mock:
mock.side_effect = AlreadyRunningError()
response = self.client.get(url, {})
already_running_status = "{report_type} report generation task is already in progress. Check the 'Pending Instructor Tasks' table for the status of the task. When completed, the report will be available for download in the table below.".format(report_type=report_type)
self.assertIn(already_running_status, response.content)
def test_get_distribution_no_feature(self):
"""
Test that get_distribution lists available features
when supplied no feature parameter.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + u'?feature=')
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
def test_get_distribution_unavailable_feature(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})
self.assertEqual(response.status_code, 400)
def test_get_distribution_gender(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'feature': 'gender'})
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(res_json['feature_results']['data']['m'], 6)
self.assertEqual(res_json['feature_results']['choices_display_names']['m'], 'Male')
self.assertEqual(res_json['feature_results']['data']['no_data'], 0)
self.assertEqual(res_json['feature_results']['choices_display_names']['no_data'], 'No Data')
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].email.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].username.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
class TestInstructorAPIRegradeTask(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
def setUp(self):
super(TestInstructorAPIRegradeTask, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_id=self.module_to_reset.module_id,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_course_has_entrance_exam_in_student_attempts_reset(self):
""" Test course has entrance exam id set while resetting attempts"""
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
'delete_module': False,
})
self.assertEqual(response.status_code, 400)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test course has entrance exam id set while re-scoring. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
class TestEntranceExamInstructorAPIRegradeTask(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can rescore student grades,
reset student attempts and delete state for entrance exam.
"""
def setUp(self):
super(TestEntranceExamInstructorAPIRegradeTask, self).setUp()
self.course = CourseFactory.create(
org='test_org',
course='test_course',
run='test_run',
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
self.course_with_invalid_ee = CourseFactory.create(entrance_exam_id='invalid_exam')
self.instructor = InstructorFactory(course_key=self.course.id)
# Add instructor to invalid ee course
CourseInstructorRole(self.course_with_invalid_ee.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.entrance_exam = ItemFactory.create(
parent=self.course,
category='chapter',
display_name='Entrance exam'
)
subsection = ItemFactory.create(
parent=self.entrance_exam,
category='sequential',
display_name='Subsection 1'
)
vertical = ItemFactory.create(
parent=subsection,
category='vertical',
display_name='Vertical 1'
)
self.ee_problem_1 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 1"
)
self.ee_problem_2 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 2"
)
ee_module_to_reset1 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_1.location,
state=json.dumps({'attempts': 10}),
)
ee_module_to_reset2 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_2.location,
state=json.dumps({'attempts': 10}),
)
self.ee_modules = [ee_module_to_reset1.module_state_key, ee_module_to_reset2.module_state_key]
def test_reset_entrance_exam_student_attempts_deletall(self):
""" Make sure no one can delete all students state on entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_entrance_exam_student_attempts_single(self):
""" Test reset single student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
for changed_module in changed_modules:
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_in_entrance_exam')
def test_reset_entrance_exam_all_student_attempts(self, act):
""" Test reset all student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_invalid_entrance_exam(self):
""" Test reset for invalid entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_entrance_exam_sttudent_delete_state(self):
""" Test delete single student entrance exam state. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
self.assertEqual(changed_modules.count(), 0)
def test_entrance_exam_delete_state_with_staff(self):
""" Test entrance exam delete state failure with staff access. """
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 403)
def test_entrance_exam_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_entrance_exam_for_student')
def test_rescore_entrance_exam_single_student(self, act):
""" Test re-scoring of entrance exam for single student. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_rescore_entrance_exam_all_student(self):
""" Test rescoring for all students. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_all_student_and_single(self):
""" Test re-scoring with both all students and single student parameters. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test re-scoring of entrance exam with invalid exam. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_list_entrance_exam_instructor_tasks_student(self):
""" Test list task history for entrance exam AND student. """
# create a re-score entrance exam task
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 1)
def test_list_entrance_exam_instructor_tasks_all_student(self):
""" Test list task history for entrance exam AND all student. """
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 0)
def test_list_entrance_exam_instructor_with_invalid_exam_key(self):
""" Test list task history for entrance exam failure if course has invalid exam. """
url = reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorSendEmail(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
def setUp(self):
super(TestInstructorSendEmail, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
self.full_test_message = {
'send_to': 'staff',
'subject': test_subject,
'message': test_message,
}
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
class TestInstructorAPITaskLists(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
def setUp(self):
super(TestInstructorAPITaskLists, self).setUp()
self.course = CourseFactory.create(
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
@patch.object(instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
class TestInstructorEmailContentList(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the instructor email content history endpoint.
"""
def setUp(self):
super(TestInstructorEmailContentList, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.tasks = {}
self.emails = {}
self.emails_info = {}
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def setup_fake_email_info(self, num_emails, with_failures=False):
""" Initialize the specified number of fake emails """
for email_id in range(num_emails):
num_sent = random.randint(1, 15401)
if with_failures:
failed = random.randint(1, 15401)
else:
failed = 0
self.tasks[email_id] = FakeContentTask(email_id, num_sent, failed, 'expected')
self.emails[email_id] = FakeEmail(email_id)
self.emails_info[email_id] = FakeEmailInfo(self.emails[email_id], num_sent, failed)
def get_matching_mock_email(self, **kwargs):
""" Returns the matching mock emails for the given id """
email_id = kwargs.get('id', 0)
return self.emails[email_id]
def get_email_content_response(self, num_emails, task_history_request, with_failures=False):
""" Calls the list_email_content endpoint and returns the repsonse """
self.setup_fake_email_info(num_emails, with_failures)
task_history_request.return_value = self.tasks.values()
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.side_effect = self.get_matching_mock_email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
return response
def check_emails_sent(self, num_emails, task_history_request, with_failures=False):
""" Tests sending emails with or without failures """
response = self.get_email_content_response(num_emails, task_history_request, with_failures)
self.assertTrue(task_history_request.called)
expected_email_info = [email_info.to_dict() for email_info in self.emails_info.values()]
actual_email_info = json.loads(response.content)['emails']
self.assertEqual(len(actual_email_info), num_emails)
for exp_email, act_email in zip(expected_email_info, actual_email_info):
self.assertDictEqual(exp_email, act_email)
self.assertEqual(expected_email_info, actual_email_info)
def test_content_list_one_email(self, task_history_request):
""" Test listing of bulk emails when email list has one email """
response = self.get_email_content_response(1, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should have one email
self.assertEqual(len(email_info), 1)
# Email content should be what's expected
expected_message = self.emails[0].html_message
returned_email_info = email_info[0]
received_message = returned_email_info[u'email'][u'html_message']
self.assertEqual(expected_message, received_message)
def test_content_list_no_emails(self, task_history_request):
""" Test listing of bulk emails when email list empty """
response = self.get_email_content_response(0, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should be empty
self.assertEqual(len(email_info), 0)
def test_content_list_email_content_many(self, task_history_request):
""" Test listing of bulk emails sent large amount of emails """
self.check_emails_sent(50, task_history_request)
def test_list_email_content_error(self, task_history_request):
""" Test handling of error retrieving email """
invalid_task = FakeContentTask(0, 0, 0, 'test')
invalid_task.make_invalid_input()
task_history_request.return_value = [invalid_task]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_email_info = json.loads(response.content)['emails']
self.assertEqual(len(returned_email_info), 1)
returned_info = returned_email_info[0]
for info in ['created', 'sent_to', 'email', 'number_sent', 'requester']:
self.assertEqual(returned_info[info], None)
def test_list_email_with_failure(self, task_history_request):
""" Test the handling of email task that had failures """
self.check_emails_sent(1, task_history_request, True)
def test_list_many_emails_with_failures(self, task_history_request):
""" Test the handling of many emails with failures """
self.check_emails_sent(50, task_history_request, True)
def test_list_email_with_no_successes(self, task_history_request):
task_info = FakeContentTask(0, 0, 10, 'expected')
email = FakeEmail(0)
email_info = FakeEmailInfo(email, 0, 10)
task_history_request.return_value = [task_info]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.return_value = email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_info_list = json.loads(response.content)['emails']
self.assertEqual(len(returned_info_list), 1)
returned_info = returned_info_list[0]
expected_info = email_info.to_dict()
self.assertDictEqual(expected_info, returned_info)
@ddt.ddt
@override_settings(ANALYTICS_SERVER_URL="http://robotanalyticsserver.netbot:900/")
@override_settings(ANALYTICS_API_KEY="robot_api_key")
class TestInstructorAPIAnalyticsProxy(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor analytics proxy endpoint.
"""
class FakeProxyResponse(object):
""" Fake successful requests response object. """
def __init__(self):
self.status_code = requests.status_codes.codes.OK
self.content = '{"test_content": "robot test content"}'
class FakeBadProxyResponse(object):
""" Fake strange-failed requests response object. """
def __init__(self):
self.status_code = 'notok.'
self.content = '{"test_content": "robot test content"}'
def setUp(self):
super(TestInstructorAPIAnalyticsProxy, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
@ddt.data((ModuleStoreEnum.Type.mongo, False), (ModuleStoreEnum.Type.split, True))
@ddt.unpack
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_url(self, store_type, assert_wo_encoding, act):
""" Test legacy analytics proxy url generation. """
with modulestore().default_store(store_type):
course = CourseFactory.create()
instructor_local = InstructorFactory(course_key=course.id)
self.client.login(username=instructor_local.username, password='test')
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 200)
# Make request URL pattern - everything but course id.
url_pattern = "{url}get?aname={aname}&course_id={course_id}&apikey={api_key}".format(
url="http://robotanalyticsserver.netbot:900/",
aname="ProblemGradeDistribution",
course_id="{course_id!s}",
api_key="robot_api_key",
)
if assert_wo_encoding:
# Format url with no URL-encoding of parameters.
assert_url = url_pattern.format(course_id=course.id.to_deprecated_string())
with self.assertRaises(AssertionError):
act.assert_called_once_with(assert_url)
# Format url *with* URL-encoding of parameters.
expected_url = url_pattern.format(course_id=quote(course.id.to_deprecated_string()))
act.assert_called_once_with(expected_url)
@override_settings(ANALYTICS_SERVER_URL="")
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_server_url(self, act):
"""
Test legacy analytics when empty server url.
"""
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 501)
@override_settings(ANALYTICS_API_KEY="")
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_api_key(self, act):
"""
Test legacy analytics when empty server API key.
"""
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 501)
@override_settings(ANALYTICS_SERVER_URL="")
@override_settings(ANALYTICS_API_KEY="")
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_empty_url_and_api_key(self, act):
"""
Test legacy analytics when empty server url & API key.
"""
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 501)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy(self, act):
"""
Test legacy analytics content proxyin, actg.
"""
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_res = {'test_content': "robot test content"}
self.assertEqual(json.loads(response.content), expected_res)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_reqfailed(self, act):
""" Test proxy when server reponds with failure. """
act.return_value = self.FakeBadProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 500)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_missing_param(self, act):
""" Test proxy when missing the aname query parameter. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 400)
self.assertFalse(act.called)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append(
"Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus',
'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
[u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = SlashSeparatedCourseKey('MITx', '6.002x', '2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(msk_from_problem_urlname(course_id, name).to_deprecated_string(), output)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
msk_from_problem_urlname(*args)
class TestDueDateExtensions(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
def setUp(self):
"""
Fixtures.
"""
super(TestDueDateExtensions, self).setUp()
due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
course = CourseFactory.create()
week1 = ItemFactory.create(due=due)
week2 = ItemFactory.create(due=due)
week3 = ItemFactory.create() # No due date
course.children = [week1.location.to_deprecated_string(), week2.location.to_deprecated_string(),
week3.location.to_deprecated_string()]
homework = ItemFactory.create(
parent_location=week1.location,
due=due
)
week1.children = [homework.location.to_deprecated_string()]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=homework.location).save()
self.course = course
self.week1 = week1
self.homework = homework
self.week2 = week2
self.week3 = week3
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=course.id)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_change_to_invalid_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '01/01/2009 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_change_nonexistent_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week3.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week3, self.user1)
)
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_reset_nonexistent_extension(self):
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 400, response.content)
def test_reset_extension_to_deleted_date(self):
"""
Test that we can delete a due date extension after deleting the normal
due date, without causing an error.
"""
self.test_change_due_date()
self.week1.due = None
self.week1 = self.store.update_item(self.week1, self.user1.id)
# Now, week1's normal due date is deleted but the extension still exists.
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'url': self.week1.location.to_deprecated_string()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
@override_settings(REGISTRATION_CODE_LENGTH=8)
class TestCourseRegistrationCodes(ModuleStoreTestCase):
"""
Test data dumps for E-commerce Course Registration Codes.
"""
def setUp(self):
"""
Fixtures.
"""
super(TestCourseRegistrationCodes, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, min_price=50)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
CourseSalesAdminRole(self.course.id).add_users(self.instructor)
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street',
'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(5):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(5):
i += 1
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
@override_settings(FINANCE_EMAIL='finance@example.com')
def test_finance_email_in_recipient_list_when_generating_registration_codes(self):
"""
Test to verify that the invoice will also be sent to the FINANCE_EMAIL when
generating registration codes
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# check for the last mail.outbox, The FINANCE_EMAIL has been appended at the
# very end, when generating registration codes
self.assertEqual(mail.outbox[-1].to[0], 'finance@example.com')
def test_user_invoice_copy_preference(self):
"""
Test to remember user invoice copy preference
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
# user invoice copy preference will be saved in api user preference; model
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], True)
# updating the user invoice copy preference during code generation flow
data['invoice'] = ''
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], False)
def test_generate_course_registration_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
def test_generate_course_registration_with_redeem_url_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
rows = body.split('\n')
index = 1
while index < len(rows):
if rows[index]:
row_data = rows[index].split(',')
code = row_data[0].replace('"', '')
self.assertTrue(row_data[1].startswith('"http')
and row_data[1].endswith('/shoppingcart/register/redeem/{0}/"'.format(code)))
index += 1
@patch.object(instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'second', 'third', 'fourth']))
def test_generate_course_registration_codes_matching_existing_coupon_code(self):
"""
Test the generated course registration code is already in the Coupon Table
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
coupon = Coupon(code='first', course_id=self.course.id.to_deprecated_string(), created_by=self.instructor)
coupon.save()
data = {
'total_registration_codes': 3, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 5) # 1 for headers, 1 for new line at the end and 3 for the actual data
@patch.object(instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'first', 'second', 'third']))
def test_generate_course_registration_codes_integrity_error(self):
"""
Test for the Integrity error against the generated code
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 2, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 4)
def test_spent_course_registration_codes_csv(self):
"""
Test to generate a response of all the spent course registration codes
"""
url = reverse('spent_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'spent_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 7)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'unit_price': 122.45, 'company_contact_email': 'Test@company.com', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(9):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(9):
i += 13
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
data = {'spent_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_active_course_registration_codes_csv(self):
"""
Test to generate a response of all the active course registration codes
"""
url = reverse('active_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'active_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 9)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'active_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_get_all_course_registration_codes_csv(self):
"""
Test to generate a response of all the course registration codes
"""
url = reverse(
'get_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {'download_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 14)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'download_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_pdf_file_throws_exception(self):
"""
test to mock the pdf file generation throws an exception
when generating registration codes.
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
with patch.object(PDFInvoice, 'generate_pdf', side_effect=Exception):
response = self.client.post(generate_code_url, data)
self.assertEqual(response.status_code, 200, response.content)
def test_get_codes_with_sale_invoice(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 5.5, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
url = reverse('get_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'download_company_name': 'Group Invoice'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
def test_with_invalid_unit_price(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 10, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 'invalid', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 400, response.content)
self.assertIn('Could not parse amount as', response.content)
def test_get_historical_coupon_codes(self):
"""
Test to download a response of all the active coupon codes
"""
get_coupon_code_url = reverse(
'get_coupon_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
for i in range(10):
coupon = Coupon(
code='test_code{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True
)
coupon.save()
#now create coupons with the expiration dates
for i in range(5):
coupon = Coupon(
code='coupon{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True,
expiration_date=datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=2)
)
coupon.save()
response = self.client.get(get_coupon_code_url)
self.assertEqual(response.status_code, 200, response.content)
# filter all the coupons
for coupon in Coupon.objects.all():
self.assertIn('"{code}","{course_id}","{discount}","0","{description}","{expiration_date}","True"'.format(
code=coupon.code,
course_id=coupon.course_id,
discount=coupon.percentage_discount,
description=coupon.description,
expiration_date=coupon.display_expiry_date
), response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_COUPON_CSV_HEADER))
class TestBulkCohorting(ModuleStoreTestCase):
"""
Test adding users to cohorts in bulk via CSV upload.
"""
def setUp(self):
super(TestBulkCohorting, self).setUp()
self.course = CourseFactory.create()
self.staff_user = StaffFactory(course_key=self.course.id)
self.non_staff_user = UserFactory.create()
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
if os.path.exists(self.tempdir):
shutil.rmtree(self.tempdir)
def call_add_users_to_cohorts(self, csv_data, suffix='.csv', method='POST'):
"""
Call `add_users_to_cohorts` with a file generated from `csv_data`.
"""
# this temporary file will be removed in `self.tearDown()`
__, file_name = tempfile.mkstemp(suffix=suffix, dir=self.tempdir)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
with open(file_name, 'r') as file_pointer:
url = reverse('add_users_to_cohorts', kwargs={'course_id': unicode(self.course.id)})
if method == 'POST':
return self.client.post(url, {'uploaded-file': file_pointer})
elif method == 'GET':
return self.client.get(url, {'uploaded-file': file_pointer})
def expect_error_on_file_content(self, file_content, error, file_suffix='.csv'):
"""
Verify that we get the error we expect for a given file input.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content, suffix=file_suffix)
self.assertEqual(response.status_code, 400)
result = json.loads(response.content)
self.assertEqual(result['error'], error)
def verify_success_on_file_content(self, file_content, mock_store_upload, mock_cohort_task):
"""
Verify that `addd_users_to_cohorts` successfully validates the
file content, uploads the input file, and triggers the
background task.
"""
mock_store_upload.return_value = (None, 'fake_file_name.csv')
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content)
self.assertEqual(response.status_code, 204)
self.assertTrue(mock_store_upload.called)
self.assertTrue(mock_cohort_task.called)
def test_no_cohort_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a cohort field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'username,email\n', "The file must contain a 'cohort' column containing cohort names."
)
def test_no_username_or_email_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a username or email field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'cohort\n', "The file must contain a 'username' column, an 'email' column, or both."
)
def test_empty_csv(self):
"""
Verify that we get a descriptive verification error when we haven't
included any data in the uploaded CSV.
"""
self.expect_error_on_file_content(
'', "The file must contain a 'cohort' column containing cohort names."
)
def test_wrong_extension(self):
"""
Verify that we get a descriptive verification error when we haven't
uploaded a file with a '.csv' extension.
"""
self.expect_error_on_file_content(
'', "The file must end with the extension '.csv'.", file_suffix='.notcsv'
)
def test_non_staff_no_access(self):
"""
Verify that we can't access the view when we aren't a staff user.
"""
self.client.login(username=self.non_staff_user.username, password='test')
response = self.call_add_users_to_cohorts('')
self.assertEqual(response.status_code, 403)
def test_post_only(self):
"""
Verify that we can't call the view when we aren't using POST.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts('', method='GET')
self.assertEqual(response.status_code, 405)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_username(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call a background task when
the CSV has username and cohort columns.
"""
self.verify_success_on_file_content(
'username,cohort\nfoo_username,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has email and cohort columns.
"""
self.verify_success_on_file_content(
'email,cohort\nfoo_email,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_username_and_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has username, email and cohort columns.
"""
self.verify_success_on_file_content(
'username,email,cohort\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_carriage_return(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns.
"""
self.verify_success_on_file_content(
'username,email,cohort\rfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_carriage_return_line_feed(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns and line
feeds.
"""
self.verify_success_on_file_content(
'username,email,cohort\r\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
| mtlchun/edx | lms/djangoapps/instructor/tests/test_api.py | Python | agpl-3.0 | 177,188 | [
"VisIt"
] | 2d50eb94961e1024c3cecd79e21a8525e3c61b7f3f1fbc76a8ada9d4eb7adb4a |
from pymol.cgo import *
from pymol import cmd
from random import random, seed
from chempy import cpv
obj = []
scale = 1.0
# display VMD-like coordinate axes
obj.extend( [ SAUSAGE,
0.0, 0.0, 0.0, # XYZ 1
scale, 0.0, 0.0, # XYZ 2
0.1 * scale, # Radius
1.0, 0.3, 0.3, # RGB Color 1
1.0, 0.3, 0.3, # RGB Color 2
] )
obj.extend( [ CONE,
scale, 0.0, 0.0, # XYZ 1
scale * 1.2, 0.0, 0.0, # XYZ 2
0.3 * scale, # Radius 1
0.0, # Radius 2
1.0, 0.3, 0.3, # RGB Color 1
1.0, 0.3, 0.3, # RGB Color 2
1.0, 1.0, # Caps 1 & 2
] )
obj.extend( [ SAUSAGE,
0.0, 0.0, 0.0, # XYZ 1
0.0, scale, 0.0, # XYZ 2
0.1 * scale, # Radius
0.3, 1.0, 0.3, # RGB Color 1
0.3, 1.0, 0.3, # RGB Color 2
] )
obj.extend( [ CONE,
0.0, scale, 0.0, # XYZ 1
0.0, scale * 1.2, 0.0, # XYZ 2
0.3 * scale, # Radius 1
0.0, # Radius 2
0.3, 1.0, 0.3, # RGB Color 1
0.3, 1.0, 0.3, # RGB Color 2
1.0, 1.0, # Caps 1 & 2
] )
obj.extend( [ SAUSAGE,
0.0, 0.0, 0.0, # XYZ 1
0.0, 0.0, scale, # XYZ 2
0.1 * scale, # Radius
0.3, 0.3, 1.0, # RGB Color 1
0.3, 0.3, 1.0, # RGB Color 2
] )
obj.extend( [ CONE,
0.0, 0.0, scale, # XYZ 1
0.0, 0.0, scale * 1.2, # XYZ 2
0.3 * scale, # Radius 1
0.0, # Radius 2
0.3, 0.3, 1.0, # RGB Color 1
0.3, 0.3, 1.0, # RGB Color 2
1.0, 1.0, # Caps 1 & 2
] )
cmd.load_cgo(obj,'cgo_axes')
# rotate the view
cmd.turn('y',-45)
cmd.turn('x',30)
# zoom out a bit
cmd.zoom('all', 2)
# move the read clipping plane back a bit to brighten things up
cmd.clip('far',-5)
| gratefulfrog/lib | python/pymol/pymol_path/examples/devel/cgo_axes.py | Python | gpl-2.0 | 2,380 | [
"ChemPy",
"PyMOL",
"VMD"
] | 73382231a5e122be87d3147453fff5868cb582a0fa271c31ec01798b04010d71 |
# Git version - should use this one for now.
print "Welcome to BOBadventure, please enter your name and hit 'Enter' to see more text. To make a choice, please enter the character infront of the option then hit 'Enter'. You might encounter questions that requires you to check on the internet."
name= raw_input("What is your name: ")
if name.lower() == "bob":
raw_input("What an interesting name!")
raw_input("You are sitting in your living room, just checking your phone. There is nothing big happening around your area, it is very safe and peaceful here. Actually, this is why you chose live here, a small and quiet neighborhood, a lot better than the one you had.")
raw_input("But today is too quiet...")
raw_input("Even when every channel is reporting about the serial killer who has already killed 29 people, you feel isolated and bored.")
raw_input("You do not see Mr. B walking his dogs toady, they walk pass you every day. You like his dogs, and they also seem to like you.")
raw_input("...")
raw_input("One hour has passed, you feel like you are at the bottom of the sea. You tried to text some friend, who you have not talked to since last year.")
raw_input("...")
raw_input("No reply.")
raw_input("...")
raw_input("You fall asleep.")
raw_input("...")
raw_input("You sit right next to someone, you cannot see her face. But you know you cherish her. She starts to run away from you, you desperately try to grab her hand. Yet, no matter how hard you try you just can't reach her.")
raw_input("...")
# the dream, forshadow something about the true end
raw_input("You are woken up by the buzzing of your phone. Someone is texting you, apparently, that person is really urgent. You have over 99 unread text message.")
for x in range(5):
print "'Help'"
choice1= raw_input("You decide to: a. Ignore b. Reply...")
while choice1 !="b":
print "The text messages do not seem to stop, may be if you reply the stranger, the texting will stop."
choice1= raw_input("You decide to: a. Ignore b. Reply...")
if choice1 == "b":
print "You replied the stranger. What do you want?"
raw_input("...")
raw_input("'Finally someone replied me Help I was trapped in some place by this psycho'")
raw_input("'I stole his phone I can't call the police'")
raw_input("'I can't let him hear me'")
raw_input("'Please help me he is about to find out me he is gonna ki...'")
raw_input("...")
raw_input("The person stop texting you. You are worried, what if it is not a prank. Someone just told you that he or she is about to get killed")
raw_input("You decided to text the stranger. 'Are you alright?'")
raw_input("...")
raw_input("'Everything is going to be fine, " + name + ", as long as you cooperate. My name is BOB, I am not going to hurt anyone if you follow my instructions.'")
raw_input("...")
raw_input("You should be terrified. But you feel really calm, like you know what to do, what is going to happen on you.")
raw_input("Maybe you are just too bored about your peaceful life. Maybe small interlude won't be that bad.")
raw_input("Now let's start with a simple game.")
raw_input("...")
# choice 2 - play the game or not
choice2= raw_input("BOB is going to play a little game with you. You have no reason to accept, at the end, BOB and the victim may just be people who are trying to do those 'social experiment' on you. You decided to: a. Accept b. Reject...")
#while choice 2 != a and b
while choice2.lower() != "b" and choice2.lower()!="reject" and choice2.lower() != "a" and choice2.lower()!= "accept":
print "You cannot always run away from making decison. It is your obligation to decide."
choice2= raw_input("Now, pick one: a. Accept b. Reject...")
#reject to play
if choice2.lower() == "b" or choice2.lower()=="reject":
raw_input("You reject BOB. There is no more text message coming, you are left alone again. You lay back to the couch, no one is going to bother you anymore.")
raw_input("you are back in peace. You are safe again.")
raw_input("...")
raw_input("You fall asleep")
raw_input("...")
raw_input("You sit right next to someone, you cannot see her face. But you know you cherish her. She starts to run away from you, you desperately try to grab her hand. Yet, no matter how hard you try you just can't reach her.")
raw_input("...")
raw_input("Someone opened your door. You can hear it. You walk to your front door.")
raw_input("No one is there.")
raw_input("Just to make sure you open the door. The sunlight is too bright, so weird it is already afternoon. You look down, you find a package.")
raw_input("...")
raw_input("It is soaked in blood.")
raw_input("...")
raw_input("You are sitting in the interrogation room. It looks like the one you have seen in movies. Grey wall, grey wall seems to crush you.")
raw_input("...")
raw_input("'Do you recognize the ring on her finger? Who were you with before you received the package? Do you recognize these faces?'")
raw_input("'These are the works done by that serial killer who already killed 29 people, of course, how could you not know, the news is everywhere!'")
raw_input("'We have to protect you, maybe we could also catch that infamous killer. But the choice is still in your hand, you decide whether we will send people to be around you or not.'")
raw_input("...")
#choice 3- have protection or not
choice3 = raw_input("Now, you know BOB is an extremely dangerous person, your blood is pumping faster, a sense of livliness in your plain life. You decided to: a. Accept b. Reject...")
while choice3.lower() != "b" and choice3.lower()!="reject" and choice3.lower() != "a" and choice3.lower()!= "accept":
print "You cannot always run away from making decison. It is your obligation to decide."
choice3= raw_input("Now, pick one: a. Accept b. Reject...")
# if accept protection
if choice3.lower() == "a" or choice3.lower() == "accept":
raw_input("You accept the protection, of course you need protection, a serial killer just send you a hand!")
raw_input("...")
raw_input("Three days have passed, you are secretly watched and protected by some undercover police. You can still live your normal life, they are really doing a good job not disturbing you.")
raw_input("...")
raw_input("You receive a text message from an unknown number.")
raw_input("'Hi! This is BOB, it has been so long since I last talked to you. So, I decide to visit you! I will bring a huge surprise~'")
raw_input("You tell the police that BOB is going to visit you.")
raw_input("They plan to catch BOB when he arrives at your house. They tell you to not worry about anything, and everything will go with plan.")
raw_input("...")
raw_input("You receive another text message from BOB.")
raw_input("'Hey, go to your bedroom, I am entering your house through your window. Don't tell anyone.")
choice10 =raw_input("You decide to: a. Tell the police b. Go to the bedroom...")
while choice10.lower() != "b" and choice10.lower() != "a":
print "You have to make a decision! Think carefully!"
choice10 =raw_input("You decide to: a. Tell the police b. Go to the bedroom...")
#tell police
if choice10.lower()== "a":
raw_input("You tell the undercover polices about BOB, they are going to hide in your bedroom.")
raw_input("...")
raw_input("You wait in your bedroom.")
raw_input("You hear someone trying to open your bedroom window. It must be BOB.")
raw_input("'Hey " + name + "! I bring you a gift! Open it. Don't worry, it won't hurt you, maybe.'")
raw_input("You are wondering why those polices are not catching BOB. You open the gift, it is a baseball bat. You remember you played baseball in your highschool.")
raw_input("'Does it reminds you of something? Do you still remember when is the last time you swing a bat? Does it feel good to hit something with a bat?'")
raw_input("As BOB is asking you some nonsense, those undercover polices come out. However, they are not planning to catch BOB")
raw_input("They gather around you, they stare at you, all 29 undecover polices.")
raw_input("Then their faces start to change, some grow blisters and become swellen, some even turn to white bones.")
raw_input("'Come on, try to remember! What you have done! What you did to those people! You can't just forget about them!'")
raw_input("The bat in your hand also starts to change.")
import random
giftdirection = {
"bottle of poison": False,
"gun": True,
"knife": False,
}
def game4(gift):
return giftdirection[gift]
def gift():
return random.choice(["bottle of poison", "gun", "knife"])
gift= gift()
raw_input("It turns to a " + gift+ ".")
ending = game4(gift)
if ending is True:
raw_input("You shoot at those people, now, four of them is dead. You quickly run out of bullets.")
if ending is False:
raw_input("It doesn't help that much when you are trying to fight a group of people, you are swallowed by the crowd.")
raw_input("You can't remember anything. You don't know what have you done. You are killed by the 29 people surrounding you.")
raw_input("...")
raw_input("'Brutal, but he deserves it!'")
raw_input("'We still haven't seen the clue about the 30th victim.'")
raw_input("'The subject did receive their engagment ring though.'")
raw_input("'Restart BOB, we did not choose the right path. Next time, we should try a different path.")
raw_input("...")
raw_input("You are sitting in your living room, just checking your phone. There is nothing big happening around your area, it is very safe and peaceful here. Actually, this is why you chose live here, a small and quiet neighborhood, a lot better than the one you had.")
raw_input("But today is too quiet...")
raw_input("Even when every channel is reporting about the serial killer who has already killed 29 people, you feel isolated and bored.")
# don't tell
if choice10.lower() == "b":
raw_input("You wait in your bedroom.")
raw_input("You hear someone trying to open your bedroom window. It must be BOB.")
raw_input("'Hey " + name + "! I bring you a gift! Open it. Don't worry, it won't hurt you, maybe.'")
raw_input("You open the gift, it is a baseball bat. You remember you played baseball in your highschool.")
raw_input("'Does it reminds you of something? Do you still remember when is the last time you swing a bat? Does it feel good to hit something with a bat?'")
raw_input("'How about these?'")
raw_input("The bat in your hand starts to change into a gun, a knife, a bottle of poison...")
raw_input("Those weapons are covered with blood.")
raw_input("'Come on, try to remember! What you have done! What you did you do with these things! You can't just forget about them!'")
raw_input("You can't remember anything. You don't know what have you done. Yet, it feels so familiar when you are holding these weapons.")
raw_input("BANG")
raw_input("BOB shot you.")
raw_input("'I guess your brain is not ready yet, there is nothing in your brain yet. Maybe next time, you will remember something~'")
raw_input("...")
raw_input("'Brutal, but he deserves it!'")
raw_input("'We still haven't seen the clue about the 30th victim.'")
raw_input("'The subject did receive their engagment ring though.'")
raw_input("'Restart BOB, we did not choose the right path. Next time, we should try a different path.")
raw_input("...")
raw_input("You are sitting in your living room, just checking your phone. There is nothing big happening around your area, it is very safe and peaceful here. Actually, this is why you chose live here, a small and quiet neighborhood, a lot better than the one you had.")
raw_input("But today is too quiet...")
raw_input("Even when every channel is reporting about the serial killer who has already killed 29 people, you feel isolated and bored.")
# if reject protection
if choice3.lower() == "b" or choice3.lower() == "reject":
raw_input("You reject the protection. Deeply in your mind you know that this sense of liveliness is going to lead you into something horrible. You don't want the polices to get involved in your life.")
raw_input("...")
raw_input("You sit down in your cozy couch in your living room. You feel isolated once again. No one is going to intrude your life, you think confidently.")
raw_input("...")
raw_input("Your phone buzzes, you received a text message.")
raw_input("'Hi! This is BOB! We haven't talked for so long, I am starting to miss you. Please come to The Great Park we shall have a face-to-face meeting there.'")
raw_input("'And everything will be clear.'")
raw_input("The Great Park, what a familiar name. Of course, everyone in this area know about this park, but there is something else about this park that triggers your feeling.")
choice4= raw_input("You decide to: a. Accept b. Reject...")
while choice4.lower() != "b" and choice4.lower()!="reject" and choice4.lower() != "a" and choice4.lower()!= "accept":
print "You cannot always run away from making decison. It is your obligation to decide."
choice4= raw_input("Now, pick one: a. Accept b. Reject...")
while choice4 != "a" and choice4 != "accept":
print "You should go to the park! You can't let this strange sense of familiarity to be stuck in your brain forever!"
choice4= raw_input("Now, try again: a. Accept b. Reject...")
# go to the park
if choice4.lower()== "a" or choice4.lower()== "accept":
raw_input("...")
raw_input("You arrive at the park, you do not like this place, but everything about this park is so familiar to you. You saw a bench under a huge oak tree, you want to sit on that bench.")
raw_input("Someone is already sitting on that bench. You just stand and wait until he leaves.")
raw_input("Now you are sitting on the bench under the oak tree. It is Spring right now; the sun is shining and warm wind is blowing across your face. You think BOB won't show up soon.")
raw_input("You fall asleep.")
raw_input("...")
raw_input("You are hiding behind the bushes, she is walking on the natural path. Leaves cover the ground, it must be Winter or Fall. You breathe heavily, looks like you are trying really hard to make sure she doesn't see you. When she walks pass a bench, you jumped out of the bushes. You grab her hand. She is resisting, and you try to calm her down...")
raw_input("...")
raw_input("'You are going to get sick if you sleep here, " + name+ ". Wake up'")
raw_input("You open your eyes, yet everything looks so blurry.")
raw_input("'Don't worry, it is just some privacy protection mechanism. Your vision will be restored after you win the game. See, I just want to play a small game with you, you rejected me last time, so this time I bring a different game. I spend a lot of money on this by the way.'")
choice5= raw_input("BOB wants to play a game with you decide to: a. Accept b. Reject...")
while choice5.lower() != "b" and choice5.lower()!="reject" and choice5.lower() != "a" and choice5.lower()!= "accept":
print "You cannot always run away from making decison. It is your obligation to decide."
choice5= raw_input("Now, are you going to play with BOB: a. Accept b. Reject...")
#reject the game
if choice5.lower() == "b" or choice5.lower() == "reject":
raw_input("'I remember I mentioned that your vision will be restored after you play this game with me. Since you are not playing it with me, just pray you are going to adapt to your blindness'")
raw_input("You are sitting on the bench, you can't go anywhere. BOB have already left, glad that he did not kill you.")
raw_input("While you are sitting, you feel extremely calm, now, without your vision, you are isolated from a lot of information of the outside world. You enter a simple and peaceful world. You stand up, and want to embrace this brand-new world...")
raw_input("BANG")
raw_input("You have been hit in the head by some metal rod, maybe a baseball bat. Warm liquid strains down your head. You lose conscious. It is har to believe you will wake up again.")
raw_input("I assume it is BOB, I guess you did not make the right choice.")
#accept the game
if choice5.lower() == "a" or choice5.lower() == "accept":
raw_input("'The game is really simple. Here put on this pair of glasses, after I start the game, you will enter a virtual reality. There will be three weapons lying infornt of you, just follow your heart and pick one. Then you just need to fight the monster using your weapon. There are three types of monter, you never know which monster you will encounter. If you win, I will bring peace back to your life... Now, the game starts!'")
raw_input("You are not sure about what will happen if you lose.")
import random
directions = {
("bat", "little monster"): True,
("gun", "little monster"): False,
("poison", "little monster"): True,
("bat", "medium monster"): True,
("gun", "medium monster"): True,
("poison'", "medium monster"): False,
("bat", "big monster"): False,
("gun", "big monster"): True,
("poison", "big monster"): False
}
def game2(weapon, monster):
return directions[(weapon,monster)]
def monster():
return random.choice(["big monster", "medium monster", "little monster"])
weapon = raw_input("You find a bat, a gun , and a bottle of poison. You chose(just type the name of the weapon)...")
while weapon != "bat" and weapon != "gun" and weapon != "poison":
print "You just need to type: bat, gun, or poison. "
weapon= raw_input("Quick! Pick one: ")
monster = monster()
result = game2(weapon, monster)
raw_input("You encounter a " + monster + ", you try to use your " + weapon + " to kill it...")
if result is True:
print "Congradulation! You killed the monster! You will receive a golden ring as a prize!"
raw_input("'Now take off your glasses.' You hear BOB talking to you. But you can't do what he says, there is no glasses on you.")
raw_input("'Ah, forget about the glasses now, it won't hurt you. Did you found anything when you killed the monster?")
raw_input("You tell him about the golden ring.")
raw_input("'Have you seen this ring before? Do you know the owner of this ring? Come on! You must remember something! There must be something left in your head!'")
raw_input("'No? Nothing? Oh forget about it! It is not the right path, subject's brain is not ready yet. Maybe next time, next time we will find something.'")
raw_input("'Restart BOB, quick, we don't have much time left!'")
raw_input("...")
raw_input("You are sitting in your living room, just checking your phone. There is nothing big happening around your area, it is very safe and peaceful here. Actually, this is why you chose live here, a small and quiet neighborhood, a lot better than the one you had.")
raw_input("But today is too quiet...")
raw_input("Even when every channel is reporting about the serial killer who has already killed 29 people, you feel isolated and bored......")
if result is False:
print "You failed to kill by the monster. I guess you are not ready yet."
raw_input("You lay on the ground, it hurts. The pain caused by the attack of the monster feel so real, it feels like you are actually bleeding. 'Hey BOB, I failed the game, what will happen to me?' You ask BOB.")
raw_input("...")
raw_input("At least one hour had passed, you start to wonder how come no one walking pass you notices something is wrong with you. You try to take off the glasses, yet, it seems like the glasses have dissappeared.")
raw_input("'BOB reloading...'")
raw_input("You see this infront of your eyes, apparently you are still in the virtual reality created by that glasses.")
raw_input("Suddenly, everything goes dark. After a few seconds, your vision becomes clearer and brighter.")
raw_input("You hear people talking... 'I guess our subject's brain is not ready yet'")
raw_input("'Apparently, that was not the right path to take.'")
raw_input("'I hope this time, we will find out the answer hidden in your head.'")
raw_input("...")
raw_input("You are sitting in your living room, just checking your phone. There is nothing big happening around your area, it is very safe and peaceful here. Actually, this is why you chose live here, a small and quiet neighborhood, a lot better than the one you had.")
raw_input("But today is too quiet...")
raw_input("Even when every channel is reporting about the serial killer who has already killed 29 people, you feel isolated and bored......")
#accept the game
if choice2.lower() == "a" or choice2.lower()== "accept":
raw_input("'I'm glad that you agree, things will be really messed up if you reject me. After all, you could quit anytime you want, but I won't guarantee what will happen on you and the victim.'")
raw_input("'The game is really simple, I will give you two riddles, you just have to find out the answer and send it to me. You will not be able to get the next riddle unless you answer the previous answer correctly.'")
raw_input("Don't worry you will have three chances for each riddle, and the answers are all inside your head. Seems really fair, right?")
raw_input("...")
def end1():
q1= raw_input("First question: What fastens two people yet touches only one? (2 words): ")
for n in range(4):
if q1.lower() == "wedding ring" and n <= 2:
q2 = raw_input("Correct! See, I told you the answers are all in your brain. Next question: Almost everyone sees me without noticing me, For what is beyond is what he or she seeks. What am I? (1 word): ")
break
elif q1.lower() != "wedding ring" and n < 2:
print "Your answer is not correct " + str(2-n) + " out of 3 chances left~"
q1 = raw_input("What fastens two people yet touches only one? (2 words): ")
elif q1.lower() != "wedding ring" and n == 2:
print "Oh come on, the answer is inside your brain!"
else:
raw_input("'Opps~ There is no chance left'")
raw_input("You feel something is pressed against your head")
raw_input("Looks like your brain is not ready yet")
print "You has been shot by someone, I assume it is BOB."
return False
for m in range(4):
if q2.lower() == "window" and m <= 2:
q3 = raw_input("Next question: Why was the photographer arrested? You don't have to give me the answer for this, if you don't know, just keep on going and hit Enter ;)")
if q3 != "He shot his customers and blew them up":
print "'I know you will have a hard time answering this, I will just tell you the answer. The photographer shot the customers and blew them up!'"
return True
elif q3 == "He shot his customers and blew them up":
print "Good job, I mean, the answers are all in your head. I just tried to make things easier for you."
return True
elif q2.lower() != "window" and m < 2:
print "Your answer is not correct " + str(2-m) + " out of 3 chances left~"
q2 = raw_input("Almost everyone sees me without noticing me, For what is beyond is what he or she seeks. What am I? (1 word): ")
elif q2.lower() != "window" and m == 2:
print "Oh come on, the answer is inside your brain!"
else:
raw_input("'Opps~ There is no chance left'")
raw_input("You feel something is pressed against your head")
raw_input("Looks like your brain is not ready yet")
print "You has been shot by someone, I assume it is BOB."
return False
#if player answer the riddle correctly
if end1() is True:
raw_input("'Ha! The last one is just a small joke to make you more relaxed. Tight nerves won't help anyone to get the answer. Now, I have another game for you. I promise this is the last one.'")
raw_input("'I want you to go to your bed room, open the drawer right next to your bed, and find a six-sided dice.'")
raw_input("You are surprised that BOB knows about the drawer and the dice.")
raw_input("'Quick, the game need that dice to start!'")
choice6= raw_input("Knowing that BOB knows about the structure and details of your house, you decide to: a. Accept b. Reject...")
while choice6.lower() != "b" and choice6.lower()!="reject" and choice6.lower() != "a" and choice6.lower()!= "accept":
print "You cannot always run away from making decison. It is your obligation to decide."
choice6= raw_input("Quick, make a decision, we don't have that much time left: a. Accept b. Reject...")
#if reject to find the dice
if choice6.lower() == "b" or choice6.lower() == "reject":
raw_input("You reject BOB, you do not want other people to decide what you are going to do. You have free will, you have the ability to choose whatever path you want to take.")
raw_input("There is no more text message coming from BOB. You think you are safe now.")
raw_input("You feel tired, after all of the things that have happened today. You sit on the couch, turn on the television, and fall asleep.")
raw_input("'Pause the operation!'")
raw_input("'BOB fails to interact with the subject, guys, we are stuck.")
raw_input("'I think we have encounter a BUG of this system, ha, those developers said they had removed all the BUGs. This is why I do not agree to cooperate with those people, we can do a much better job alone.'")
raw_input("'Restart BOB, quick! We don't have much time left, let's just hope next time, we will find the right path and the answer.'")
raw_input("...")
raw_input("You are sitting in your living room, just checking your phone. There is nothing big happening around your area, it is very safe and peaceful here. Actually, this is why you chose live here, a small and quiet neighborhood, a lot better than the one you had.")
raw_input("But today is too quiet.......")
if choice6.lower() == "a" or choice6.lower() == "accept":
raw_input("You think the safest way right now is to what BOB says. You head toward your bed room.")
raw_input("You are about to go pass your kitchen, there are knives in the kitchen...")
choice7= raw_input("a. Get a knife it may be useful later b. Keep walking you are running out of time...")
while choice7.lower() != "b" and choice7.lower() != "a":
print "You cannot always run away from making decison. It is your obligation to decide."
choice7= raw_input("Quick, make a decision, we don't have that much time left: a. Get the knife b. Keep going...")
#go to kitch
if choice7.lower() == "a":
raw_input("You open the door of the kitchen, you hear the exhasut fan running.")
raw_input("Weird, you do not remember when did you turn on the fan.")
raw_input("You saw two people standing in your kitchen, you recognize one of them.")
raw_input("'Sarah!' You yell at your ex-fiancee, but she doesn't seem to recognize you.")
raw_input("They knock you down, 'The other one must be BOB', you realize")
raw_input("They turn off the exhaust fan, you smell sulfur.")
raw_input("BOB punches you in your stomach, you are going to loose consciousness. You know they are going to blow up your house.")
raw_input("Your mind starts to wonder. You are confused why Sarah wants to kill you, you loved her so much that you even bought her a really expensive engagement ring.")
raw_input("Maybe it is because she finds out your secret, the secret you hide from the world.")
raw_input("If you could go back in time, before Sarah broke up with you in the park, things could end up differently...")
raw_input("'This is by far the biggest clue we have found!'")
raw_input("'Guys, I think we are on the right track, but we still need more information.'")
raw_input("'We should try a slightly different path, maybe we will find a better answer...'")
#keep walking
if choice7.lower() == "b":
raw_input("Forget about the knife, BOB is probably watching your action somewhere, you do not want to make him angry.")
raw_input("You enter your bedroom.")
raw_input("...")
raw_input("There is no one in your bedroom, you thought BOB will be there.")
raw_input("You open your drawer. Beside the from the dice, you also see a golden ring.")
raw_input("That was your engagment ring, your fiancee just broke up with you. What a stupid way it is to break up in your favorite park.")
raw_input("You text BOB: 'I have find the dice.")
raw_input("...")
raw_input("'Great! The game we are going to play is really simple. You just need to roll the dice and text me the number. I will also roll my six-sided dice, but my dice is slightly different than yours, there are only three different numbers. I can't tell you what those numbers are, but if the number you rolled out is the same with the number I rolled out, you will loose the game.'")
choice8= raw_input("You decide to: a. Roll the dice b. Runaway...")
while choice8.lower() != "b" and choice8.lower() != "a":
print "You cannot always run away from making decison. It is your obligation to decide."
choice8= raw_input("This is a really important! Make a decision!: a. Roll the dice b. Runaway...")
if choice8.lower() == "b":
raw_input("You runaway, everything is insane! You have to get help!")
raw_input("BANG...")
raw_input("You have been shot. You fall on the floor. You saw two people, one is holding the gun, and the other one is Sarah, you ex-fiancee.")
raw_input("The one holding the gun must be BOB. You assumed that Sarah hires BOB to kill you, becasue she found out your deepest secret. You were planning to kill her and burry her, but she has already taken action before you do...")
raw_input("...")
raw_input("'It ends here?'")
raw_input("'These are enough to proof subject's intention, but we don't know where did the subject burry the victim")
raw_input("'Run BOB again, we need to find a better path.")
raw_input("...")
raw_input("You are sitting in your living room, just checking your phone. There is nothing big happening around your area, it is very safe and peaceful here. Actually, this is why you chose live here, a small and quiet neighborhood, a lot better than the one you had.")
raw_input("But today is too quiet......")
if choice8.lower() == "a":
raw_input("You roll the dice.")
raw_input("...")
import random
directions2 = {
("1", "1"): True,
("1", "2"): False,
("1", "3"): False,
("1", "4"): False,
("1", "5"): False,
("1", "6"): False,
("2", "1"): False,
("2", "2"): True,
("2", "3"): False,
("2", "4"): False,
("2", "5"): False,
("2", "6"): False,
("6", "1"): False,
("6", "2"): False,
("6", "3"): False,
("6", "4"): False,
("6", "5"): False,
("6", "6"): True,
}
def game3(bob, dice):
return directions2[(bob, dice)]
def bob():
return random.choice(["1","2","6"])
def dice():
return random.choice(["1","2","3","4","5","6"])
bob = bob()
dice = dice()
result2 = game3(bob, dice)
raw_input("It is " + dice)
raw_input("You text BOB, 'I rolled out " + dice)
raw_input("...")
if result2 is True:
raw_input("'Oh~ How unlucky, I also rolled out " + bob + " I guess you lose. Try to run, someone is coming for you~")
raw_input("The bedroom door is opened, you saw someone holding a baseball bat.")
raw_input("It is Sarah, your ex-fiancee.")
raw_input("You are in extreme terror, you are afraid of Sarah, just like she is someone who has just risen from death. You cannot move.")
raw_input("She hits you with the bat, as she hits harder, her face deforms. She starts to look like other people, some of them you recognize, some of them seems familiar. Those faces all look rotten and grievous.")
raw_input("You are beaten to a point, even the weapon she holds starts to change. Gun, arrow, metal rod, knife...")
raw_input("...")
raw_input("'That's it? There must be more!")
raw_input("'I guess this is not the right path to the answer, but we find out the weapons used by the subject.'")
raw_input("'If we pick a different path, we might find out where he burried his 30th victim...'")
elif result2 is False:
raw_input("I rolled out " + bob + " How lucky! Now, go and see what is under your bed, I left you a weapon. You will need that for later. Someone is coming for you~")
raw_input("You look under your bed, there is a baseball bat.")
raw_input("Your bedroom door is opened.")
raw_input("You saw Sarah, your ex-fiancee, also holding a baseball bat.")
raw_input("She tries to attack you, but you block her attack, and knock her down to the floor.")
raw_input("You lose control, you start hitting her until she is covered with blood.")
raw_input("What have you done! You just killed your ex-fiancee!")
raw_input("You receive a text message 'Opps, seems like you killed Sarah~ Now, you have to find somewhere to hind her~ Quick, I already called the police, they will be here in about 3 minutes'")
def run():
choice9 = raw_input("What should you do?: a. Go somewhere to burry the body b. Explain to the police...")
for x in range(3):
if choice9.lower() == "a":
raw_input("You carry Sarah's body to your car.")
return True
elif choice9.lower() != "a":
raw_input("Are you kidding, you do not have time! I don't think the polices will listen to you when they see what did you do to Sarah!")
choice9= raw_input("Quick! Be wise!: a. Burry the body b. Wait for the police...")
else:
raw_input("The polices have arrived at your house.")
return False
if run() is True:
raw_input("You start driving, you don't have a clear idea about where are you going, but you just keep driving.")
raw_input("After around thirty minutes, you arrived at The Great Park, where Sarah broke with you. Yes! You are going to burry her in this park! Let her rots under the big oak tree, where she told you to get out of her life.")
raw_input("You try to dig the ground under the oak tree, but no matter how hard you try, the hole just doesn't seem deep enough.")
raw_input("Suddenly, you find something, it is a hand.")
raw_input("You saw a golden ring in the hand, just like the one Sarah had.....")
raw_input("...")
raw_input("'Guys! We find the answer! Quick! Tell them the body is burried under the oak tree in The Great Park!'")
raw_input("'I can't believe this actually works! What a crazy idea it is to interact with the criminal's brain!'")
raw_input("What else can we do, the subject has lost most of his memory, we have to use BOB to retrieve them.")
raw_input("Don't be so excited, those field people still have to dig under that oak tree, may be the body of the 30th victim is not burried there.....")
elif run() is False:
raw_input("You are caught by the police. You try to explain BOB to them, they think you are telling lies. They check your phone, you haven't recieve any message since last week.")
raw_input("They identify the victim, it is the missing 30th victim of the serial killer...")
raw_input("'I guess, our subject just killed his 30th victim again in the virtual reality created by his brain and our system.'")
raw_input("'Now we know, what did he use to kill Sarah. However, we still have to find out where he burried her.'")
raw_input("Quick! Restart BOB, we are really close to the answer! Next time, try a slightly different path.")
raw_input("...")
raw_input("You are sitting in your living room, just checking your phone. There is nothing big happening around your area, it is very safe and peaceful here. Actually, this is why you chose live here, a small and quiet neighborhood, a lot better than the one you had.")
raw_input("But today is too quiet...")
raw_input("Even when every channel is reporting about the serial killer who has already killed 29 people, you feel isolated and bored......")
| Jawcris/BOBadevnture | word_adventure.py | Python | apache-2.0 | 41,526 | [
"VisIt"
] | bb0276ce0fd1db4b90052295fe6abcd59ab4c49cb834f4b7c65dace5c2817aac |
# -*- coding: utf-8 -*-
"""
End-to-end tests for Student's Profile Page.
"""
from datetime import datetime
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.learner_profile import LearnerProfilePage
from common.test.acceptance.tests.helpers import AcceptanceTest, EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = u'all_users'
PRIVACY_PRIVATE = u'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed"
def log_in_as_unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
# These value_for_dropdown_field method calls used to include
# focus_out = True, but a change in selenium is focusing out of the
# drop down after selection without any more action needed.
profile_page.value_for_dropdown_field('language_proficiencies', 'English')
profile_page.value_for_dropdown_field('country', 'United Arab Emirates')
profile_page.set_value_for_textarea_field('bio', 'Nothing Special')
# Waits here for text to appear/save on bio field
profile_page.wait_for_ajax()
def visit_profile_page(self, username, privacy=None):
"""
Visit a user's profile page and if a privacy is specified and
is different from the displayed value, then set the privacy to that value.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
# Change the privacy setting if it is not the desired one already
profile_page.privacy = privacy
# Verify the current setting is as expected
if privacy == self.PRIVACY_PUBLIC:
assert profile_page.privacy == 'all_users'
else:
assert profile_page.privacy == 'private'
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.start_time = datetime.now()
# Load the page
profile_page.visit()
return profile_page
def initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
class LearnerProfileA11yTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Class to test learner profile accessibility.
"""
a11y = True
def test_editable_learner_profile_a11y(self):
"""
Test the accessibility of the editable version of the profile page
(user viewing her own public profile).
"""
username, _ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('language_proficiencies')
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('bio')
profile_page.a11y_audit.check_for_accessibility_errors()
def test_read_only_learner_profile_a11y(self):
"""
Test the accessibility of the read-only version of a public profile page
(user viewing someone else's profile page).
"""
# initialize_different_user should cause country, language, and bio to be filled out (since
# privacy is public). It doesn't appear that this is happening, although the method
# works in regular bokchoy tests. Perhaps a problem with phantomjs? So this test is currently
# only looking at a read-only profile page with a username.
different_username, _ = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
profile_page.a11y_audit.check_for_accessibility_errors()
def test_badges_accessibility(self):
"""
Test the accessibility of the badge listings and sharing modal.
"""
username = 'testcert'
AutoAuthPage(self.browser, username=username).visit()
profile_page = self.visit_profile_page(username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
'color-contrast' # AC-938
]
})
profile_page.display_accomplishments()
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.badges[0].display_modal()
profile_page.a11y_audit.check_for_accessibility_errors()
| stvstnfrd/edx-platform | common/test/acceptance/tests/lms/test_learner_profile.py | Python | agpl-3.0 | 6,505 | [
"VisIt"
] | fe099151b69416e286ab0f25cf9580b5662e3c1d332c1d07c2eab28f7553f2c0 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import warnings
import numpy as np
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import DummySpecies, Element, Species
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Magmom
from pymatgen.io.cif import CifBlock, CifParser, CifWriter
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.symmetry.structure import SymmetrizedStructure
from pymatgen.util.testing import PymatgenTest
try:
import pybtex
except ImportError:
pybtex = None
class CifBlockTest(PymatgenTest):
def test_to_string(self):
with open(self.TEST_FILES_DIR / "Graphite.cif") as f:
s = f.read()
c = CifBlock.from_string(s)
cif_str_2 = str(CifBlock.from_string(str(c)))
cif_str = """data_53781-ICSD
_database_code_ICSD 53781
_audit_creation_date 2003-04-01
_audit_update_record 2013-02-01
_chemical_name_systematic Carbon
_chemical_formula_structural C
_chemical_formula_sum C1
_chemical_name_structure_type Graphite(2H)
_chemical_name_mineral 'Graphite 2H'
_exptl_crystal_density_diffrn 2.22
_publ_section_title 'Structure of graphite'
loop_
_citation_id
_citation_journal_full
_citation_year
_citation_journal_volume
_citation_page_first
_citation_page_last
_citation_journal_id_ASTM
primary 'Physical Review (1,1893-132,1963/141,1966-188,1969)'
1917 10 661 696 PHRVAO
loop_
_publ_author_name
'Hull, A.W.'
_cell_length_a 2.47
_cell_length_b 2.47
_cell_length_c 6.8
_cell_angle_alpha 90.
_cell_angle_beta 90.
_cell_angle_gamma 120.
_cell_volume 35.93
_cell_formula_units_Z 4
_symmetry_space_group_name_H-M 'P 63/m m c'
_symmetry_Int_Tables_number 194
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, x-y, -z+1/2'
2 '-x+y, y, -z+1/2'
3 '-y, -x, -z+1/2'
4 '-x+y, -x, -z+1/2'
5 '-y, x-y, -z+1/2'
6 'x, y, -z+1/2'
7 '-x, -x+y, z+1/2'
8 'x-y, -y, z+1/2'
9 'y, x, z+1/2'
10 'x-y, x, z+1/2'
11 'y, -x+y, z+1/2'
12 '-x, -y, z+1/2'
13 '-x, -x+y, -z'
14 'x-y, -y, -z'
15 'y, x, -z'
16 'x-y, x, -z'
17 'y, -x+y, -z'
18 '-x, -y, -z'
19 'x, x-y, z'
20 '-x+y, y, z'
21 '-y, -x, z'
22 '-x+y, -x, z'
23 '-y, x-y, z'
24 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
C0+ 0
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_symmetry_multiplicity
_atom_site_Wyckoff_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_B_iso_or_equiv
_atom_site_occupancy
_atom_site_attached_hydrogens
C1 C0+ 2 b 0 0 0.25 . 1. 0
C2 C0+ 2 c 0.3333 0.6667 0.25 . 1. 0"""
for l1, l2, l3 in zip(str(c).split("\n"), cif_str.split("\n"), cif_str_2.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
self.assertEqual(l2.strip(), l3.strip())
def test_double_quotes_and_underscore_data(self):
cif_str = """data_test
_symmetry_space_group_name_H-M "P -3 m 1"
_thing '_annoying_data'"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_symmetry_space_group_name_H-M"], "P -3 m 1")
self.assertEqual(cb["_thing"], "_annoying_data")
self.assertEqual(str(cb), cif_str.replace('"', "'"))
def test_double_quoted_data(self):
cif_str = """data_test
_thing ' '_annoying_data''
_other " "_more_annoying_data""
_more ' "even more" ' """
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_thing"], " '_annoying_data'")
self.assertEqual(cb["_other"], ' "_more_annoying_data"')
self.assertEqual(cb["_more"], ' "even more" ')
def test_nested_fake_multiline_quotes(self):
cif_str = """data_test
_thing
;
long quotes
;
still in the quote
;
actually going to end now
;"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(
cb["_thing"],
" long quotes ; still in the quote ; actually going to end now",
)
def test_long_loop(self):
data = {
"_stuff1": ["A" * 30] * 2,
"_stuff2": ["B" * 30] * 2,
"_stuff3": ["C" * 30] * 2,
}
loops = [["_stuff1", "_stuff2", "_stuff3"]]
cif_str = """data_test
loop_
_stuff1
_stuff2
_stuff3
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"""
self.assertEqual(str(CifBlock(data, loops, "test")), cif_str)
class CifIOTest(PymatgenTest):
def test_CifParser(self):
parser = CifParser(self.TEST_FILES_DIR / "LiFePO4.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li4 Fe4 P4 O16", "Incorrectly parsed cif.")
parser = CifParser(self.TEST_FILES_DIR / "V2O3.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "V4 O6")
bibtex_str = """
@article{cifref0,
author = "Andersson, G.",
title = "Studies on vanadium oxides. I. Phase analysis",
journal = "Acta Chemica Scandinavica (1-27,1973-42,1988)",
volume = "8",
year = "1954",
pages = "1599--1606"
}
"""
self.assertEqual(parser.get_bibtex_string().strip(), bibtex_str.strip())
parser = CifParser(self.TEST_FILES_DIR / "Li2O.cif")
prim = parser.get_structures(True)[0]
self.assertEqual(prim.formula, "Li2 O1")
conv = parser.get_structures(False)[0]
self.assertEqual(conv.formula, "Li8 O4")
# test for disordered structures
parser = CifParser(self.TEST_FILES_DIR / "Li10GeP2S12.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li20.2 Ge2.06 P3.94 S24", "Incorrectly parsed cif.")
cif_str = r"""#\#CIF1.1
##########################################################################
# Crystallographic Information Format file
# Produced by PyCifRW module
#
# This is a CIF file. CIF has been adopted by the International
# Union of Crystallography as the standard for data archiving and
# transmission.
#
# For information on this file format, follow the CIF links at
# http://www.iucr.org
##########################################################################
data_FePO4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 10.4117668699
_cell_length_b 6.06717187997
_cell_length_c 4.75948953998
loop_ # sometimes this is in a loop (incorrectly)
_cell_angle_alpha
91.0
_cell_angle_beta 92.0
_cell_angle_gamma 93.0
_chemical_name_systematic 'Generated by pymatgen'
_symmetry_Int_Tables_number 1
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_attached_hydrogens
_atom_site_B_iso_or_equiv
_atom_site_occupancy
Fe Fe1 1 0.218728 0.750000 0.474867 0 . 1
Fe JJ2 1 0.281272 0.250000 0.974867 0 . 1
# there's a typo here, parser should read the symbol from the
# _atom_site_type_symbol
Fe Fe3 1 0.718728 0.750000 0.025133 0 . 1
Fe Fe4 1 0.781272 0.250000 0.525133 0 . 1
P P5 1 0.094613 0.250000 0.418243 0 . 1
P P6 1 0.405387 0.750000 0.918243 0 . 1
P P7 1 0.594613 0.250000 0.081757 0 . 1
P P8 1 0.905387 0.750000 0.581757 0 . 1
O O9 1 0.043372 0.750000 0.707138 0 . 1
O O10 1 0.096642 0.250000 0.741320 0 . 1
O O11 1 0.165710 0.046072 0.285384 0 . 1
O O12 1 0.165710 0.453928 0.285384 0 . 1
O O13 1 0.334290 0.546072 0.785384 0 . 1
O O14 1 0.334290 0.953928 0.785384 0 . 1
O O15 1 0.403358 0.750000 0.241320 0 . 1
O O16 1 0.456628 0.250000 0.207138 0 . 1
O O17 1 0.543372 0.750000 0.792862 0 . 1
O O18 1 0.596642 0.250000 0.758680 0 . 1
O O19 1 0.665710 0.046072 0.214616 0 . 1
O O20 1 0.665710 0.453928 0.214616 0 . 1
O O21 1 0.834290 0.546072 0.714616 0 . 1
O O22 1 0.834290 0.953928 0.714616 0 . 1
O O23 1 0.903358 0.750000 0.258680 0 . 1
O O24 1 0.956628 0.250000 0.292862 0 . 1
"""
parser = CifParser.from_string(cif_str)
struct = parser.get_structures(primitive=False)[0]
self.assertEqual(struct.formula, "Fe4 P4 O16")
self.assertAlmostEqual(struct.lattice.a, 10.4117668699)
self.assertAlmostEqual(struct.lattice.b, 6.06717187997)
self.assertAlmostEqual(struct.lattice.c, 4.75948953998)
self.assertAlmostEqual(struct.lattice.alpha, 91)
self.assertAlmostEqual(struct.lattice.beta, 92)
self.assertAlmostEqual(struct.lattice.gamma, 93)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "srycoo.cif")
self.assertEqual(parser.get_structures()[0].formula, "Sr5.6 Y2.4 Co8 O21")
# Test with a decimal Xyz. This should parse as two atoms in
# conventional cell if it is correct, one if not.
parser = CifParser(self.TEST_FILES_DIR / "Fe.cif")
self.assertEqual(len(parser.get_structures(primitive=False)[0]), 2)
self.assertFalse(parser.has_errors)
def test_get_symmetrized_structure(self):
parser = CifParser(self.TEST_FILES_DIR / "Li2O.cif")
sym_structure = parser.get_structures(primitive=False, symmetrized=True)[0]
structure = parser.get_structures(primitive=False, symmetrized=False)[0]
self.assertIsInstance(sym_structure, SymmetrizedStructure)
self.assertEqual(structure, sym_structure)
self.assertEqual(sym_structure.equivalent_indices, [[0, 1, 2, 3], [4, 5, 6, 7, 8, 9, 10, 11]])
def test_site_symbol_preference(self):
parser = CifParser(self.TEST_FILES_DIR / "site_type_symbol_test.cif")
self.assertEqual(parser.get_structures()[0].formula, "Ge0.4 Sb0.4 Te1")
def test_implicit_hydrogen(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "Senegalite_implicit_hydrogen.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Al8 P4 O32")
self.assertEqual(sum(s.site_properties["implicit_hydrogens"]), 20)
self.assertIn(
"Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.",
parser.warnings,
)
parser = CifParser(self.TEST_FILES_DIR / "cif_implicit_hydrogens_cod_1011130.cif")
s = parser.get_structures()[0]
self.assertIn(
"Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.",
parser.warnings,
)
def test_CifParserSpringerPauling(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Below are 10 tests for CIFs from the Springer Materials/Pauling file DBs.
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1928405.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Er1 Mn3.888 Fe2.112 Sn6")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, previously parsed as an ordered structure
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1011081.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Zr0.2 Nb0.8")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1615854.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Na2 Al2 Si6 O16")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1622133.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ca0.184 Mg13.016 Fe2.8 Si16 O48")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, previously parsed as an ordered structure
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1908491.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Mn0.48 Zn0.52 Ga2 Se4")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1811457.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ba2 Mg0.6 Zr0.2 Ta1.2 O6")
self.assertTrue(parser.has_errors)
# Incomplete powder diffraction data, previously unparsable
# This CIF file contains the molecular species "NH3" which is
# parsed as "N" because the label is "N{x}" (x = 1,2,..) and the
# corresponding symbol is "NH3". Since, the label and symbol are switched
# in CIFs from Springer Materials/Pauling file DBs, CifParser parses the
# element as "Nh" (Nihonium).
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1002871.cif")
self.assertEqual(parser.get_structures(True)[0].formula, "Cu1 Br2 Nh6")
self.assertEqual(parser.get_structures(True)[1].formula, "Cu1 Br4 Nh6")
self.assertTrue(parser.has_errors)
# Incomplete powder diffraction data, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1704003.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Rb4 Mn2 F12")
self.assertTrue(parser.has_errors)
# Unparsable species 'OH/OH2', previously parsed as "O"
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1500382.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Mg6 B2 O6 F1.764")
self.assertTrue(parser.has_errors)
# Unparsable species 'OH/OH2', previously parsed as "O"
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1601634.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Zn1.29 Fe0.69 As2 Pb1.02 O8")
def test_CifParserCod(self):
"""
Parsing problematic cif files from the COD database
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Symbol in capital letters
parser = CifParser(self.TEST_FILES_DIR / "Cod_2100513.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ca4 Nb2.0 Al2 O12")
# Label in capital letters
parser = CifParser(self.TEST_FILES_DIR / "Cod_4115344.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Mo4 P2 H60 C60 I4 O4")
def test_parse_symbol(self):
"""
Test the _parse_symbol function with several potentially
problematic examples of symbols and labels.
"""
test_cases = {
"MgT": "Mg",
"MgT1": "Mg",
"H(46A)": "H",
"O(M)": "O",
"N(Am)": "N",
"H1N2a": "H",
"CO(1)": "Co",
"Wat1": "O",
"MgM2A": "Mg",
"CaX": "Ca",
"X1": "X",
"X": "X",
"OA1": "O",
"NaA2": "Na",
"O-H2": "O",
"OD2": "O",
"OW": "O",
"SiT": "Si",
"SiTet": "Si",
"Na-Int": "Na",
"CaD1": "Ca",
"KAm": "K",
"D+1": "D",
"D": "D",
"D1-": "D",
"D4": "D",
"D0": "D",
"NH": "Nh",
"NH2": "Nh",
"NH3": "Nh",
"SH": "S",
}
for e in Element:
name = e.name
test_cases[name] = name
if len(name) == 2:
test_cases[name.upper()] = name
test_cases[name.upper() + str(1)] = name
test_cases[name.upper() + "A"] = name
test_cases[name + str(1)] = name
test_cases[name + str(2)] = name
test_cases[name + str(3)] = name
test_cases[name + str(1) + "A"] = name
special = {"Hw": "H", "Ow": "O", "Wat": "O", "wat": "O", "OH": "", "OH2": ""}
test_cases.update(special)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "LiFePO4.cif")
for sym, expected_symbol in test_cases.items():
self.assertEqual(parser._parse_symbol(sym), expected_symbol)
def test_CifWriter(self):
filepath = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath)
writer = CifWriter(poscar.structure, symprec=0.01)
ans = """# generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe0 4 0.21872822 0.75000000 0.47486711 1
P P1 4 0.09461309 0.25000000 0.41824327 1
O O2 8 0.16570974 0.04607233 0.28538394 1
O O3 4 0.04337231 0.75000000 0.70713767 1
O O4 4 0.09664244 0.25000000 0.74132035 1"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
def test_symmetrized(self):
filepath = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath, check_for_POTCAR=False)
writer = CifWriter(poscar.structure, symprec=0.1)
cif = CifParser.from_string(str(writer))
m = StructureMatcher()
self.assertTrue(m.fit(cif.get_structures()[0], poscar.structure))
# for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
# self.assertEqual(l1.strip(), l2.strip())
s = Structure.from_file(self.TEST_FILES_DIR / "LiFePO4.cif")
writer = CifWriter(s, symprec=0.1)
s2 = CifParser.from_string(str(writer)).get_structures()[0]
self.assertTrue(m.fit(s, s2))
s = self.get_structure("Li2O")
writer = CifWriter(s, symprec=0.1)
s2 = CifParser.from_string(str(writer)).get_structures()[0]
self.assertTrue(m.fit(s, s2))
# test angle tolerance.
s = Structure.from_file(self.TEST_FILES_DIR / "LiFePO4.cif")
writer = CifWriter(s, symprec=0.1, angle_tolerance=0)
d = list(writer.ciffile.data.values())[0]
self.assertEqual(d["_symmetry_Int_Tables_number"], 14)
s = Structure.from_file(self.TEST_FILES_DIR / "LiFePO4.cif")
writer = CifWriter(s, symprec=0.1, angle_tolerance=2)
d = list(writer.ciffile.data.values())[0]
self.assertEqual(d["_symmetry_Int_Tables_number"], 62)
def test_disordered(self):
si = Element("Si")
n = Element("N")
coords = []
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
lattice = Lattice(
np.array(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
)
struct = Structure(lattice, [si, {si: 0.5, n: 0.5}], coords)
writer = CifWriter(struct)
ans = """# generated using pymatgen
data_Si1.5N0.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural Si1.5N0.5
_chemical_formula_sum 'Si1.5 N0.5'
_cell_volume 40.04479464
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Si Si0 1 0.00000000 0.00000000 0.00000000 1
Si Si1 1 0.75000000 0.50000000 0.75000000 0.5
N N2 1 0.75000000 0.50000000 0.75000000 0.5"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
def test_cifwrite_without_refinement(self):
si2 = Structure.from_file(self.TEST_FILES_DIR / "abinit" / "si.cif")
writer = CifWriter(si2, symprec=1e-3, significant_figures=10, refine_struct=False)
s = str(writer)
assert "Fd-3m" in s
same_si2 = CifParser.from_string(s).get_structures()[0]
assert len(si2) == len(same_si2)
def test_specie_cifwriter(self):
si4 = Species("Si", 4)
si3 = Species("Si", 3)
n = DummySpecies("X", -3)
coords = []
coords.append(np.array([0.5, 0.5, 0.5]))
coords.append(np.array([0.75, 0.5, 0.75]))
coords.append(np.array([0, 0, 0]))
lattice = Lattice(
np.array(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
)
struct = Structure(lattice, [n, {si3: 0.5, n: 0.5}, si4], coords)
writer = CifWriter(struct)
ans = """# generated using pymatgen
data_X1.5Si1.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural X1.5Si1.5
_chemical_formula_sum 'X1.5 Si1.5'
_cell_volume 40.04479464
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
X3- -3.0
Si3+ 3.0
Si4+ 4.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
X3- X0 1 0.50000000 0.50000000 0.50000000 1
X3- X1 1 0.75000000 0.50000000 0.75000000 0.5
Si3+ Si2 1 0.75000000 0.50000000 0.75000000 0.5
Si4+ Si3 1 0.00000000 0.00000000 0.00000000 1
"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
# test that mixed valence works properly
s2 = Structure.from_str(ans, "cif")
self.assertEqual(struct.composition, s2.composition)
def test_primes(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "C26H16BeN2O2S2.cif")
for s in parser.get_structures(False):
self.assertEqual(s.composition, 8 * Composition("C26H16BeN2O2S2"))
def test_missing_atom_site_type_with_oxistates(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "P24Ru4H252C296S24N16.cif")
c = Composition({"S0+": 24, "Ru0+": 4, "H0+": 252, "C0+": 296, "N0+": 16, "P0+": 24})
for s in parser.get_structures(False):
self.assertEqual(s.composition, c)
def test_no_coords_or_species(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
string = """#generated using pymatgen
data_Si1.5N1.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural Si1.5N1.5
_chemical_formula_sum 'Si1.5 N1.5'
_cell_volume 40.0447946443
_cell_formula_units_Z 0
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
Si3+ 3.0
Si4+ 4.0
N3- -3.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
? ? ? ? ? ? ?
"""
parser = CifParser.from_string(string)
self.assertRaises(ValueError, parser.get_structures)
def test_get_lattice_from_lattice_type(self):
cif_structure = """#generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
_symmetry_cell_setting Orthorhombic
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe1 1 0.218728 0.750000 0.474867 1
Fe Fe2 1 0.281272 0.250000 0.974867 1
Fe Fe3 1 0.718728 0.750000 0.025133 1
Fe Fe4 1 0.781272 0.250000 0.525133 1
P P5 1 0.094613 0.250000 0.418243 1
P P6 1 0.405387 0.750000 0.918243 1
P P7 1 0.594613 0.250000 0.081757 1
P P8 1 0.905387 0.750000 0.581757 1
O O9 1 0.043372 0.750000 0.707138 1
O O10 1 0.096642 0.250000 0.741320 1
O O11 1 0.165710 0.046072 0.285384 1
O O12 1 0.165710 0.453928 0.285384 1
O O13 1 0.334290 0.546072 0.785384 1
O O14 1 0.334290 0.953928 0.785384 1
O O15 1 0.403358 0.750000 0.241320 1
O O16 1 0.456628 0.250000 0.207138 1
O O17 1 0.543372 0.750000 0.792862 1
O O18 1 0.596642 0.250000 0.758680 1
O O19 1 0.665710 0.046072 0.214616 1
O O20 1 0.665710 0.453928 0.214616 1
O O21 1 0.834290 0.546072 0.714616 1
O O22 1 0.834290 0.953928 0.714616 1
O O23 1 0.903358 0.750000 0.258680 1
O O24 1 0.956628 0.250000 0.292862 1
"""
cp = CifParser.from_string(cif_structure)
s_test = cp.get_structures(False)[0]
filepath = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath)
s_ref = poscar.structure
sm = StructureMatcher(stol=0.05, ltol=0.01, angle_tol=0.1)
self.assertTrue(sm.fit(s_ref, s_test))
def test_empty(self):
# single line
cb = CifBlock.from_string("data_mwe\nloop_\n_tag\n ''")
self.assertEqual(cb.data["_tag"][0], "")
# multi line
cb = CifBlock.from_string("data_mwe\nloop_\n_tag\n;\n;")
self.assertEqual(cb.data["_tag"][0], "")
cb2 = CifBlock.from_string(str(cb))
self.assertEqual(cb, cb2)
def test_bad_cif(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "bad_occu.cif"
p = CifParser(f)
self.assertRaises(ValueError, p.get_structures)
p = CifParser(f, occupancy_tolerance=2)
s = p.get_structures()[0]
self.assertAlmostEqual(s[0].species["Al3+"], 0.5)
def test_one_line_symm(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "OneLineSymmP1.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "Ga4 Pb2 O8")
def test_no_symmops(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "nosymm.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "H96 C60 O8")
def test_dot_positions(self):
f = self.TEST_FILES_DIR / "ICSD59959.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "K1 Mn1 F3")
def test_replacing_finite_precision_frac_coords(self):
f = self.TEST_FILES_DIR / "cif_finite_precision_frac_coord_error.cif"
with warnings.catch_warnings():
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(str(s.composition), "N5+24")
self.assertIn(
"Some fractional coordinates rounded to ideal values to avoid issues with finite precision.",
p.warnings,
)
def test_empty_deque(self):
s = """data_1526655
_journal_name_full
_space_group_IT_number 227
_symmetry_space_group_name_Hall 'F 4d 2 3 -1d'
_symmetry_space_group_name_H-M 'F d -3 m :1'
_cell_angle_alpha 90
_cell_angle_beta 90
_cell_angle_gamma 90
_cell_formula_units_Z 8
_cell_length_a 5.381
_cell_length_b 5.381
_cell_length_c 5.381
_cell_volume 155.808
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
_atom_site_U_iso_or_equiv
Si1 Si 0 0 0 1 0.0
_iucr_refine_fcf_details
;
data_symmetries
loop_
_space_group_symop_id
_space_group_symop_operation_xyz
1 x,y,z
2 -x+1/2,y+1/2,-z+1/2
3 -x,-y,-z
4 x-1/2,-y-1/2,z-1/2
;"""
p = CifParser.from_string(s)
self.assertEqual(p.get_structures()[0].formula, "Si1")
cif = """
data_1526655
_journal_name_full
_space_group_IT_number 227
_symmetry_space_group_name_Hall 'F 4d 2 3 -1d'
_symmetry_space_group_name_H-M 'F d -3 m :1'
_cell_angle_alpha 90
_cell_angle_beta 90
_cell_angle_gamma 90
_cell_formula_units_Z 8
_cell_length_a 5.381
_cell_length_b 5.381
_cell_length_c 5.381
_cell_volume 155.808
_iucr_refine_fcf_details
;
data_symmetries
Some arbitrary multiline string
;
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
_atom_site_U_iso_or_equiv
Si1 Si 0 0 0 1 0.0
"""
p = CifParser.from_string(cif)
self.assertRaises(ValueError, p.get_structures)
class MagCifTest(PymatgenTest):
def setUp(self):
warnings.filterwarnings("ignore")
self.mcif = CifParser(self.TEST_FILES_DIR / "magnetic.example.NiO.mcif")
self.mcif_ncl = CifParser(self.TEST_FILES_DIR / "magnetic.ncl.example.GdB4.mcif")
self.mcif_incom = CifParser(self.TEST_FILES_DIR / "magnetic.incommensurate.example.Cr.mcif")
self.mcif_disord = CifParser(self.TEST_FILES_DIR / "magnetic.disordered.example.CuMnO2.mcif")
self.mcif_ncl2 = CifParser(self.TEST_FILES_DIR / "Mn3Ge_IR2.mcif")
def tearDown(self):
warnings.simplefilter("default")
def test_mcif_detection(self):
self.assertTrue(self.mcif.feature_flags["magcif"])
self.assertTrue(self.mcif_ncl.feature_flags["magcif"])
self.assertTrue(self.mcif_incom.feature_flags["magcif"])
self.assertTrue(self.mcif_disord.feature_flags["magcif"])
self.assertFalse(self.mcif.feature_flags["magcif_incommensurate"])
self.assertFalse(self.mcif_ncl.feature_flags["magcif_incommensurate"])
self.assertTrue(self.mcif_incom.feature_flags["magcif_incommensurate"])
self.assertFalse(self.mcif_disord.feature_flags["magcif_incommensurate"])
def test_get_structures(self):
# incommensurate structures not currently supported
self.assertRaises(NotImplementedError, self.mcif_incom.get_structures)
# disordered magnetic structures not currently supported
self.assertRaises(NotImplementedError, self.mcif_disord.get_structures)
# taken from self.mcif_ncl, removing explicit magnetic symmops
# so that MagneticSymmetryGroup() has to be invoked
magcifstr = """
data_5yOhtAoR
_space_group.magn_name_BNS "P 4/m' b' m' "
_cell_length_a 7.1316
_cell_length_b 7.1316
_cell_length_c 4.0505
_cell_angle_alpha 90.00
_cell_angle_beta 90.00
_cell_angle_gamma 90.00
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd1 Gd 0.31746 0.81746 0.00000 1
B1 B 0.00000 0.00000 0.20290 1
B2 B 0.17590 0.03800 0.50000 1
B3 B 0.08670 0.58670 0.50000 1
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd1 5.05 5.05 0.0"""
s = self.mcif.get_structures(primitive=False)[0]
self.assertEqual(s.formula, "Ni32 O32")
self.assertTrue(Magmom.are_collinear(s.site_properties["magmom"]))
# example with non-collinear spin
s_ncl = self.mcif_ncl.get_structures(primitive=False)[0]
s_ncl_from_msg = CifParser.from_string(magcifstr).get_structures(primitive=False)[0]
self.assertEqual(s_ncl.formula, "Gd4 B16")
self.assertFalse(Magmom.are_collinear(s_ncl.site_properties["magmom"]))
self.assertTrue(s_ncl.matches(s_ncl_from_msg))
def test_write(self):
cw_ref_string = """# generated using pymatgen
data_GdB4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 7.13160000
_cell_length_b 7.13160000
_cell_length_c 4.05050000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural GdB4
_chemical_formula_sum 'Gd4 B16'
_cell_volume 206.00729003
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd Gd0 1 0.31746000 0.81746000 0.00000000 1.0
Gd Gd1 1 0.18254000 0.31746000 0.00000000 1.0
Gd Gd2 1 0.81746000 0.68254000 0.00000000 1.0
Gd Gd3 1 0.68254000 0.18254000 0.00000000 1.0
B B4 1 0.00000000 0.00000000 0.20290000 1.0
B B5 1 0.50000000 0.50000000 0.79710000 1.0
B B6 1 0.00000000 0.00000000 0.79710000 1.0
B B7 1 0.50000000 0.50000000 0.20290000 1.0
B B8 1 0.17590000 0.03800000 0.50000000 1.0
B B9 1 0.96200000 0.17590000 0.50000000 1.0
B B10 1 0.03800000 0.82410000 0.50000000 1.0
B B11 1 0.67590000 0.46200000 0.50000000 1.0
B B12 1 0.32410000 0.53800000 0.50000000 1.0
B B13 1 0.82410000 0.96200000 0.50000000 1.0
B B14 1 0.53800000 0.67590000 0.50000000 1.0
B B15 1 0.46200000 0.32410000 0.50000000 1.0
B B16 1 0.08670000 0.58670000 0.50000000 1.0
B B17 1 0.41330000 0.08670000 0.50000000 1.0
B B18 1 0.58670000 0.91330000 0.50000000 1.0
B B19 1 0.91330000 0.41330000 0.50000000 1.0
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd0 5.05000000 5.05000000 0.00000000
Gd1 -5.05000000 5.05000000 0.00000000
Gd2 5.05000000 -5.05000000 0.00000000
Gd3 -5.05000000 -5.05000000 0.00000000
"""
s_ncl = self.mcif_ncl.get_structures(primitive=False)[0]
cw = CifWriter(s_ncl, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_ref_string)
# from list-type magmoms
list_magmoms = [list(m) for m in s_ncl.site_properties["magmom"]]
# float magmoms (magnitude only)
float_magmoms = [float(m) for m in s_ncl.site_properties["magmom"]]
s_ncl.add_site_property("magmom", list_magmoms)
cw = CifWriter(s_ncl, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_ref_string)
s_ncl.add_site_property("magmom", float_magmoms)
cw = CifWriter(s_ncl, write_magmoms=True)
cw_ref_string_magnitudes = """# generated using pymatgen
data_GdB4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 7.13160000
_cell_length_b 7.13160000
_cell_length_c 4.05050000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural GdB4
_chemical_formula_sum 'Gd4 B16'
_cell_volume 206.00729003
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd Gd0 1 0.31746000 0.81746000 0.00000000 1.0
Gd Gd1 1 0.18254000 0.31746000 0.00000000 1.0
Gd Gd2 1 0.81746000 0.68254000 0.00000000 1.0
Gd Gd3 1 0.68254000 0.18254000 0.00000000 1.0
B B4 1 0.00000000 0.00000000 0.20290000 1.0
B B5 1 0.50000000 0.50000000 0.79710000 1.0
B B6 1 0.00000000 0.00000000 0.79710000 1.0
B B7 1 0.50000000 0.50000000 0.20290000 1.0
B B8 1 0.17590000 0.03800000 0.50000000 1.0
B B9 1 0.96200000 0.17590000 0.50000000 1.0
B B10 1 0.03800000 0.82410000 0.50000000 1.0
B B11 1 0.67590000 0.46200000 0.50000000 1.0
B B12 1 0.32410000 0.53800000 0.50000000 1.0
B B13 1 0.82410000 0.96200000 0.50000000 1.0
B B14 1 0.53800000 0.67590000 0.50000000 1.0
B B15 1 0.46200000 0.32410000 0.50000000 1.0
B B16 1 0.08670000 0.58670000 0.50000000 1.0
B B17 1 0.41330000 0.08670000 0.50000000 1.0
B B18 1 0.58670000 0.91330000 0.50000000 1.0
B B19 1 0.91330000 0.41330000 0.50000000 1.0
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd0 0.00000000 0.00000000 7.14177849
Gd1 0.00000000 0.00000000 7.14177849
Gd2 0.00000000 0.00000000 -7.14177849
Gd3 0.00000000 0.00000000 -7.14177849
"""
self.assertEqual(cw.__str__().strip(), cw_ref_string_magnitudes.strip())
# test we're getting correct magmoms in ncl case
s_ncl2 = self.mcif_ncl2.get_structures()[0]
list_magmoms = [list(m) for m in s_ncl2.site_properties["magmom"]]
self.assertEqual(list_magmoms[0][0], 0.0)
self.assertAlmostEqual(list_magmoms[0][1], 5.9160793408726366)
self.assertAlmostEqual(list_magmoms[1][0], -5.1234749999999991)
self.assertAlmostEqual(list_magmoms[1][1], 2.9580396704363183)
# test creating an structure without oxidation state doesn't raise errors
s_manual = Structure(Lattice.cubic(4.2), ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
s_manual.add_spin_by_site([1, -1])
cw = CifWriter(s_manual, write_magmoms=True)
# check oxidation state
cw_manual_oxi_string = """# generated using pymatgen
data_CsCl
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 4.20000000
_cell_length_b 4.20000000
_cell_length_c 4.20000000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural CsCl
_chemical_formula_sum 'Cs1 Cl1'
_cell_volume 74.08800000
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
Cs+ 1.0
Cl+ 1.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Cs+ Cs0 1 0.00000000 0.00000000 0.00000000 1
Cl+ Cl1 1 0.50000000 0.50000000 0.50000000 1
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
"""
s_manual.add_oxidation_state_by_site([1, 1])
cw = CifWriter(s_manual, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_manual_oxi_string)
@unittest.skipIf(pybtex is None, "pybtex not present")
def test_bibtex(self):
ref_bibtex_string = """@article{cifref0,
author = "Blanco, J.A.",
journal = "PHYSICAL REVIEW B",
volume = "73",
year = "2006",
pages = "?--?"
}
"""
self.assertEqual(self.mcif_ncl.get_bibtex_string(), ref_bibtex_string)
if __name__ == "__main__":
unittest.main()
| materialsproject/pymatgen | pymatgen/io/tests/test_cif.py | Python | mit | 42,693 | [
"ABINIT",
"VASP",
"pymatgen"
] | 88e879e171a83a83775c2025a56733f31fd4042062ff31f08aacc86b447e53ba |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.